summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c8
-rw-r--r--drivers/net/ethernet/3com/3c515.c6
-rw-r--r--drivers/net/ethernet/3com/3c59x.c9
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c85
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c306
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h34
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h53
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c94
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c244
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c252
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c240
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h56
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c146
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c188
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h70
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c74
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c110
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c77
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h5
-rw-r--r--drivers/net/ethernet/apple/bmac.c3
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c174
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c24
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig10
-rw-r--r--drivers/net/ethernet/cadence/Makefile5
-rw-r--r--drivers/net/ethernet/cadence/macb.h159
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c (renamed from drivers/net/ethernet/cadence/macb.c)302
-rwxr-xr-xdrivers/net/ethernet/cadence/macb_ptp.c518
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c19
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c60
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c103
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c48
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h33
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c34
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c288
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c475
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h74
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c267
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h56
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h10
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c7
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c5
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c27
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c5
-rw-r--r--drivers/net/ethernet/fealnx.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c6
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c107
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c12
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c109
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c403
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h5
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h76
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c530
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c272
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h449
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c474
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c18
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h449
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c18
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c286
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/jme.c47
-rw-r--r--drivers/net/ethernet/korina.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c214
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c107
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c99
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h18
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c311
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c461
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h140
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c322
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1042
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c283
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h204
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c296
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/ipoib.c)80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/ipoib.h)7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Kconfig13
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h111
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c619
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h297
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1587
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h416
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c992
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1027
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c1999
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c5
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/netronome/Kconfig11
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile26
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c (renamed from drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c)4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h (renamed from drivers/net/ethernet/netronome/nfp/nfp_bpf.h)23
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c (renamed from drivers/net/ethernet/netronome/nfp/nfp_net_offload.c)61
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c (renamed from drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c)2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c211
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c157
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h317
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c390
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c292
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c438
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c400
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c126
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h314
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c199
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c192
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c150
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h148
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1245
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c863
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c396
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h128
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c233
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h199
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c186
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c70
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c154
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c58
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c251
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3576
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c311
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3584
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c267
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2074
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h93
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2408
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h189
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c328
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h79
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c934
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c189
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1787
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h206
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h196
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1960
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h199
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c116
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c559
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c271
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h102
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c63
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c46
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c130
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c (renamed from drivers/net/ethernet/qlogic/qede/qede_roce.c)144
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig24
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c30
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c (renamed from drivers/net/ethernet/qualcomm/qca_framing.c)26
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h (renamed from drivers/net/ethernet/qualcomm/qca_framing.h)24
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c51
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c423
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c25
-rw-r--r--drivers/net/ethernet/rocker/rocker.h21
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c235
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c610
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c7
-rw-r--r--drivers/net/ethernet/sfc/selftest.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c5
-rw-r--r--drivers/net/ethernet/silan/sc92031.c10
-rw-r--r--drivers/net/ethernet/sis/sis190.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c5
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c1011
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c203
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/cpts.h16
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c26
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/via/via-rhine.c5
484 files changed, 40908 insertions, 17033 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index db8592d412ab..f66c9710cb81 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1039,7 +1039,7 @@ el3_link_ok(struct net_device *dev)
return tmp & (1<<11);
}
-static int
+static void
el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
{
u16 tmp;
@@ -1082,7 +1082,6 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
supported);
cmd->base.speed = SPEED_10;
EL3WINDOW(1);
- return 0;
}
static int
@@ -1151,12 +1150,11 @@ static int el3_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
- int ret;
spin_lock_irq(&lp->lock);
- ret = el3_netdev_get_ecmd(dev, cmd);
+ el3_netdev_get_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
- return ret;
+ return 0;
}
static int el3_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index e7b1fa56b290..c5987f518cb2 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1370,9 +1370,9 @@ static int boomerang_rx(struct net_device *dev)
(skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
- memcpy(skb_put(skb, pkt_len),
- isa_bus_to_virt(vp->rx_ring[entry].
- addr), pkt_len);
+ skb_put_data(skb,
+ isa_bus_to_virt(vp->rx_ring[entry].addr),
+ pkt_len);
rx_copy++;
} else {
void *temp;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index e41245a54f8b..3b516ebeeddb 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2628,9 +2628,8 @@ boomerang_rx(struct net_device *dev)
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
- memcpy(skb_put(skb, pkt_len),
- vp->rx_skbuff[entry]->data,
- pkt_len);
+ skb_put_data(skb, vp->rx_skbuff[entry]->data,
+ pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
@@ -2912,7 +2911,9 @@ static int vortex_get_link_ksettings(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&vp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&vp->mii, cmd);
+
+ return 0;
}
static int vortex_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index db02bc2fb4b2..05d9d3e2e92e 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -723,6 +723,12 @@ static int ax_init_dev(struct net_device *dev)
ax->plat->mac_addr)
memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ eth_hw_addr_random(dev);
+ dev_info(&dev->dev, "Using random MAC address: %pM\n",
+ dev->dev_addr);
+ }
+
ax_reset_8390(dev);
ei_local->name = "AX88796";
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index d8e133ced7b8..4309be3724ad 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -807,7 +807,8 @@ static int greth_rx(struct net_device *dev, int limit)
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
- memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+ skb_put_data(skb, phys_to_virt(dma_addr),
+ pkt_len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += pkt_len;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 87a11b9f0ea5..54eff90e2f02 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2282,7 +2282,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
adapter->netdev->stats.rx_bytes += rfd->len;
- memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
+ skb_put_data(skb, fbr->virt[buff_index], rfd->len);
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index c8f4d26fc9d4..3143de45baaa 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -633,7 +633,7 @@ static void emac_rx(struct net_device *dev)
if (!skb)
continue;
skb_reserve(skb, 2);
- rdptr = (u8 *) skb_put(skb, rxlen - 4);
+ rdptr = skb_put(skb, rxlen - 4);
/* Read received packet from RX SRAM */
if (netif_msg_rx_status(db))
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 5b6509d59716..305dc1996b4e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -70,6 +70,8 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
+
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
@@ -749,6 +751,31 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ u16 mmio_read_timeout;
+
+ /* value in ms */
+ u16 driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ u16 missing_tx_completion_timeout;
+
+ u16 missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ u16 admin_completion_tx_timeout;
+
+ u16 netdev_wd_timeout;
+
+ u16 max_tx_sgl_size;
+
+ u16 max_rx_sgl_size;
+
+ u16 reserved[8];
+};
+
struct ena_admin_get_feat_cmd {
struct ena_admin_aq_common_desc aq_common_descriptor;
@@ -782,6 +809,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_rss_ind_table ind_table;
struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
} u;
};
@@ -857,6 +886,8 @@ enum ena_admin_aenq_notification_syndrom {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
+
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
struct ena_admin_aenq_entry {
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index f5b237e0bd60..52beba8c7a39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -99,8 +99,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return -EINVAL;
}
- ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high = (u64)addr >> 32;
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0;
}
@@ -329,7 +329,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size;
int dev_node = 0;
- memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
@@ -383,7 +383,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size;
int prev_node = 0;
- memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes =
@@ -494,7 +494,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return -ENOMEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return -EPERM;
+ return -EOPNOTSUPP;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -511,7 +511,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags, timeout;
int ret;
- timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
+ timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
while (1) {
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -561,7 +561,8 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
+ usecs_to_jiffies(
+ admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -601,12 +602,15 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
might_sleep();
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
return readl(ena_dev->reg_bar + offset);
@@ -627,14 +631,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
udelay(1);
}
- if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ if (unlikely(i == timeout)) {
pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
@@ -681,7 +685,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX;
@@ -786,7 +790,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
pr_debug("Feature %d isn't supported\n", feature_id);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -963,7 +967,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
@@ -1155,7 +1159,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
@@ -1263,7 +1267,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
@@ -1324,7 +1328,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups, groups_flag);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1619,8 +1623,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
- memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */
io_cq->q_depth = ctx->queue_size;
@@ -1730,6 +1734,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload));
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->hw_hints, 0x0,
+ sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
return 0;
}
@@ -1807,7 +1825,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1835,6 +1854,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
@@ -1855,6 +1876,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
return rc;
}
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
return 0;
}
@@ -1909,7 +1938,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1963,7 +1992,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_FUNCTION)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
- return -EPERM;
+ return -EOPNOTSUPP;
}
/* Validate hash function is supported */
@@ -1975,7 +2004,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2034,7 +2063,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func);
- return -EPERM;
+ return -EOPNOTSUPP;
}
switch (func) {
@@ -2127,7 +2156,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_INPUT)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
- return -EPERM;
+ return -EOPNOTSUPP;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2208,7 +2237,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return -EPERM;
+ return -EOPNOTSUPP;
}
}
@@ -2286,7 +2315,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return -EPERM;
+ return -EOPNOTSUPP;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2553,7 +2582,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == -EPERM) {
+ if (rc == -EOPNOTSUPP) {
pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index c9b33ee5f258..7b784f8a06a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -97,6 +97,8 @@
#define ENA_INTR_MODER_LEVEL_STRIDE 2
#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -232,7 +234,9 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
spinlock_t q_lock; /* spinlock for the admin queue */
+
struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
@@ -267,6 +271,7 @@ struct ena_com_aenq {
struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
+ u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
@@ -336,6 +341,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
};
struct ena_com_create_io_ctx {
@@ -414,10 +420,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f999305e1363..b11e573ad57a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase)
return -EAGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
*req_id = READ_ONCE(cdesc->req_id);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 3ee55e2fd694..b1212debc2e1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+ ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
};
@@ -539,12 +540,8 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
}
rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
- if (rc) {
- /* If device don't have permission, return unsupported */
- if (rc == -EPERM)
- rc = -EOPNOTSUPP;
+ if (rc)
return rc;
- }
cmd->data = ena_flow_hash_to_flow_type(hash_fields);
@@ -612,7 +609,7 @@ static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
rc = -EOPNOTSUPP;
}
- return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+ return rc;
}
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
@@ -638,7 +635,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
rc = -EOPNOTSUPP;
}
- return (rc == -EPERM) ? -EOPNOTSUPP : rc;
+ return rc;
}
static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4f16ed38bcf3..f7dc22f65d9f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -87,6 +87,7 @@ static void ena_tx_timeout(struct net_device *dev)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
+ adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp);
@@ -303,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
ena_free_tx_resources(adapter, i);
}
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Invalid rx req_id: %hu\n", req_id);
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.bad_req_id++;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ /* Trigger device reset */
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
+ return -EFAULT;
+}
+
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
* @adapter: network interface device structure
* @qid: queue index
@@ -314,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
{
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
- int size, node;
+ int size, node, i;
if (rx_ring->rx_buffer_info) {
netif_err(adapter, ifup, adapter->netdev,
@@ -335,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
return -ENOMEM;
}
+ size = sizeof(u16) * rx_ring->ring_size;
+ rx_ring->free_rx_ids = vzalloc_node(size, node);
+ if (!rx_ring->free_rx_ids) {
+ rx_ring->free_rx_ids = vzalloc(size);
+ if (!rx_ring->free_rx_ids) {
+ vfree(rx_ring->rx_buffer_info);
+ return -ENOMEM;
+ }
+ }
+
+ /* Req id ring for receiving RX pkts out of order */
+ for (i = 0; i < rx_ring->ring_size; i++)
+ rx_ring->free_rx_ids[i] = i;
+
/* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
@@ -358,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
+
+ vfree(rx_ring->free_rx_ids);
+ rx_ring->free_rx_ids = NULL;
}
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
@@ -463,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
{
- u16 next_to_use;
+ u16 next_to_use, req_id;
u32 i;
int rc;
next_to_use = rx_ring->next_to_use;
for (i = 0; i < num; i++) {
- struct ena_rx_buffer *rx_info =
- &rx_ring->rx_buffer_info[next_to_use];
+ struct ena_rx_buffer *rx_info;
+
+ req_id = rx_ring->free_rx_ids[next_to_use];
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc < 0))
+ break;
+
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
rc = ena_alloc_rx_page(rx_ring, rx_info,
__GFP_COLD | GFP_ATOMIC | __GFP_COMP);
@@ -483,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
}
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
&rx_info->ena_buf,
- next_to_use);
+ req_id);
if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
"failed to add buffer for rx queue %d\n",
@@ -670,6 +713,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
u64_stats_update_end(&tx_ring->syncp);
/* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
return -EFAULT;
}
@@ -781,19 +825,42 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts;
}
+static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
+{
+ struct sk_buff *skb;
+
+ if (frags)
+ skb = napi_get_frags(rx_ring->napi);
+ else
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_copybreak);
+
+ if (unlikely(!skb)) {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.skb_alloc_fail++;
+ u64_stats_update_end(&rx_ring->syncp);
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Failed to allocate skb. frags: %d\n", frags);
+ return NULL;
+ }
+
+ return skb;
+}
+
static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
struct ena_com_rx_buf_info *ena_bufs,
u32 descs,
u16 *next_to_clean)
{
struct sk_buff *skb;
- struct ena_rx_buffer *rx_info =
- &rx_ring->rx_buffer_info[*next_to_clean];
- u32 len;
- u32 buf = 0;
+ struct ena_rx_buffer *rx_info;
+ u16 len, req_id, buf = 0;
void *va;
- len = ena_bufs[0].len;
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Page is NULL\n");
@@ -809,16 +876,9 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
prefetch(va + NET_IP_ALIGN);
if (len <= rx_ring->rx_copybreak) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
- if (unlikely(!skb)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Failed to allocate skb\n");
+ skb = ena_alloc_skb(rx_ring, false);
+ if (unlikely(!skb))
return NULL;
- }
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx allocated small packet. len %d. data_len %d\n",
@@ -837,20 +897,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size);
return skb;
}
- skb = napi_get_frags(rx_ring->napi);
- if (unlikely(!skb)) {
- netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "Failed allocating skb\n");
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ skb = ena_alloc_skb(rx_ring, true);
+ if (unlikely(!skb))
return NULL;
- }
do {
dma_unmap_page(rx_ring->dev,
@@ -865,13 +920,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb->len, skb->data_len);
rx_info->page = NULL;
+
+ rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size);
if (likely(--descs == 0))
break;
- rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
- len = ena_bufs[++buf].len;
+
+ buf++;
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+ rx_info = &rx_ring->rx_buffer_info[req_id];
} while (1);
return skb;
@@ -972,6 +1032,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int rc = 0;
int total_len = 0;
int rx_copybreak_pkt = 0;
+ int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
@@ -1001,9 +1062,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
- next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
- ena_rx_ctx.descs,
- rx_ring->ring_size);
+ for (i = 0; i < ena_rx_ctx.descs; i++) {
+ rx_ring->free_tx_ids[next_to_clean] =
+ rx_ring->ena_bufs[i].req_id;
+ next_to_clean =
+ ENA_RX_RING_IDX_NEXT(next_to_clean,
+ rx_ring->ring_size);
+ }
break;
}
@@ -1055,6 +1120,7 @@ error:
u64_stats_update_end(&rx_ring->syncp);
/* Too many desc from the device. Trigger reset */
+ adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return 0;
@@ -1208,14 +1274,25 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- napi_schedule(&ena_napi->napi);
+ napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED;
}
+/* Reserve a single MSI-X vector for management (admin + aenq).
+ * plus reserve one vector for each potential io queue.
+ * the number of potential io queues is the minimum of what the device
+ * supports and the number of vCPUs.
+ */
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
{
- int msix_vecs, rc;
+ int msix_vecs, irq_cnt;
+
+ if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Error, MSI-X is already enabled\n");
+ return -EPERM;
+ }
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
@@ -1223,25 +1300,28 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
- rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
- PCI_IRQ_MSIX);
- if (rc < 0) {
+ irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
+ msix_vecs, PCI_IRQ_MSIX);
+
+ if (irq_cnt < 0) {
netif_err(adapter, probe, adapter->netdev,
- "Failed to enable MSI-X, vectors %d rc %d\n",
- msix_vecs, rc);
+ "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
return -ENOSPC;
}
- netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n",
- msix_vecs);
-
- if (msix_vecs >= 1) {
- if (ena_init_rx_cpu_rmap(adapter))
- netif_warn(adapter, probe, adapter->netdev,
- "Failed to map IRQs to CPUs\n");
+ if (irq_cnt != msix_vecs) {
+ netif_notice(adapter, probe, adapter->netdev,
+ "enable only %d MSI-X (out of %d), reduce the number of queues\n",
+ irq_cnt, msix_vecs);
+ adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- adapter->msix_vecs = msix_vecs;
+ if (ena_init_rx_cpu_rmap(adapter))
+ netif_warn(adapter, probe, adapter->netdev,
+ "Failed to map IRQs to CPUs\n");
+
+ adapter->msix_vecs = irq_cnt;
+ set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
return 0;
}
@@ -1318,6 +1398,12 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int rc = 0, i, k;
+ if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Failed to request I/O IRQ: MSI-X is not enabled\n");
+ return -EINVAL;
+ }
+
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
@@ -1376,6 +1462,12 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
}
}
+static void ena_disable_msix(struct ena_adapter *adapter)
+{
+ if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
+ pci_free_irq_vectors(adapter->pdev);
+}
+
static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
{
int i;
@@ -1446,7 +1538,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* In case the RSS table wasn't initialized by probe */
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
- if (rc && (rc != -EPERM)) {
+ if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to init RSS rc: %d\n", rc);
return rc;
@@ -1455,17 +1547,17 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* Set indirect table */
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && rc != -EPERM))
+ if (unlikely(rc && rc != -EOPNOTSUPP))
return rc;
/* Configure hash function (if supported) */
rc = ena_com_set_hash_function(ena_dev);
- if (unlikely(rc && (rc != -EPERM)))
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc;
/* Configure hash inputs (if supported) */
rc = ena_com_set_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != -EPERM)))
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc;
return 0;
@@ -1720,7 +1812,7 @@ static void ena_down(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
- rc = ena_com_dev_reset(adapter->ena_dev);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
}
@@ -2144,7 +2236,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- if (rc == -EPERM)
+ if (rc == -EOPNOTSUPP)
pr_warn("Cannot set host attributes\n");
else
pr_err("Cannot set host attributes\n");
@@ -2181,7 +2273,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
- if (rc == -EPERM)
+ if (rc == -EOPNOTSUPP)
netif_warn(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
else
@@ -2353,7 +2445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
dev_err(dev, "Can not reset device\n");
goto err_mmio_read_less;
@@ -2464,7 +2556,8 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
return 0;
err_disable_msix:
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
+
return rc;
}
@@ -2502,7 +2595,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
ena_com_abort_admin_commands(ena_dev);
@@ -2512,6 +2605,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
/* Finish with the destroy part. Start the init part */
@@ -2553,7 +2647,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
@@ -2577,7 +2671,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
if (unlikely(last_jiffies &&
- time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+ time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2586,10 +2680,13 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf->print_once = 1;
missed_tx++;
- if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+ if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
- missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+ missed_tx,
+ adapter->missing_tx_completion_threshold);
+ adapter->reset_reason =
+ ENA_REGS_RESET_MISS_TX_CMPL;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return -EIO;
}
@@ -2613,6 +2710,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
+ if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
@@ -2690,14 +2790,18 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (!adapter->wd_state)
return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies
- + ENA_DEVICE_KALIVE_TIMEOUT);
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
+ adapter->keep_alive_timeout);
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.wd_expired++;
u64_stats_update_end(&adapter->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
}
@@ -2710,10 +2814,49 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.admin_q_pause++;
u64_stats_update_end(&adapter->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
}
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev->admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev->mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->missed_tx_completion_count_threshold_to_reset)
+ adapter->missing_tx_completion_threshold =
+ hints->missed_tx_completion_count_threshold_to_reset;
+
+ if (hints->missing_tx_completion_timeout) {
+ if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ adapter->missing_tx_completion_to =
+ msecs_to_jiffies(hints->missing_tx_completion_timeout);
+ }
+
+ if (hints->netdev_wd_timeout)
+ netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ adapter->keep_alive_timeout =
+ msecs_to_jiffies(hints->driver_watchdog_timeout);
+ }
+}
+
static void ena_update_host_info(struct ena_admin_host_info *host_info,
struct net_device *netdev)
{
@@ -2886,7 +3029,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -2894,13 +3037,13 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != -EPERM))) {
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash control\n");
goto err_fill_indir;
}
@@ -3076,6 +3219,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_set_conf_feat_params(adapter, &get_feat_ctx);
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
@@ -3114,7 +3258,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_worker_destroy;
}
rc = ena_rss_init_default(adapter);
- if (rc && (rc != -EPERM)) {
+ if (rc && (rc != -EOPNOTSUPP)) {
dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
goto err_free_msix;
}
@@ -3136,6 +3280,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+ adapter->missing_tx_completion_to = TX_TIMEOUT;
+ adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
+
+ ena_update_hints(adapter, &get_feat_ctx.hw_hints);
setup_timer(&adapter->timer_service, ena_timer_service,
(unsigned long)adapter);
@@ -3155,9 +3304,9 @@ err_rss:
ena_com_delete_debug_area(ena_dev);
ena_com_rss_destroy(ena_dev);
err_free_msix:
- ena_com_dev_reset(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
@@ -3238,11 +3387,11 @@ static void ena_remove(struct pci_dev *pdev)
/* Reset the device only if the device is running. */
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- ena_com_dev_reset(ena_dev);
+ ena_com_dev_reset(ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
- pci_free_irq_vectors(adapter->pdev);
+ ena_disable_msix(adapter);
free_netdev(netdev);
@@ -3329,14 +3478,24 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_keep_alive_desc *desc;
+ u64 rx_drops;
+ desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies;
+
+ rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.rx_drops = rx_drops;
+ u64_stats_update_end(&adapter->syncp);
}
static void ena_notification(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e)
{
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_ena_hw_hints *hints;
WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
"Invalid group(%x) expected %x\n",
@@ -3354,6 +3513,11 @@ static void ena_notification(void *adapter_data,
case ENA_ADMIN_RESUME:
queue_work(ena_wq, &adapter->resume_io_task);
break;
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
default:
netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index a4d3d5e21068..29bb5704260b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,21 +44,24 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 7
+#define DRV_MODULE_VER_MINOR 2
+#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
#define DRV_MODULE_VERSION \
__stringify(DRV_MODULE_VER_MAJOR) "." \
__stringify(DRV_MODULE_VER_MINOR) "." \
- __stringify(DRV_MODULE_VER_SUBMINOR)
+ __stringify(DRV_MODULE_VER_SUBMINOR) "k"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
/* 1 for AENQ + ADMIN */
-#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
+#define ENA_ADMIN_MSIX_VEC 1
+#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+
+#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0
#define ENA_MEM_BAR 2
@@ -194,12 +197,19 @@ struct ena_stats_rx {
u64 dma_mapping_err;
u64 bad_desc_num;
u64 rx_copybreak_pkt;
+ u64 bad_req_id;
u64 empty_rx_ring;
};
struct ena_ring {
- /* Holds the empty requests for TX out of order completions */
- u16 *free_tx_ids;
+ union {
+ /* Holds the empty requests for TX/RX
+ * out of order completions
+ */
+ u16 *free_tx_ids;
+ u16 *free_rx_ids;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info;
struct ena_rx_buffer *rx_buffer_info;
@@ -260,6 +270,7 @@ enum ena_flags_t {
ENA_FLAG_DEVICE_RUNNING,
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
+ ENA_FLAG_MSIX_ENABLED,
ENA_FLAG_TRIGGER_RESET
};
@@ -280,6 +291,8 @@ struct ena_adapter {
int msix_vecs;
+ u32 missing_tx_completion_threshold;
+
u32 tx_usecs, rx_usecs; /* interrupt moderation */
u32 tx_frames, rx_frames; /* interrupt moderation */
@@ -293,6 +306,9 @@ struct ena_adapter {
u8 mac_addr[ETH_ALEN];
+ unsigned long keep_alive_timeout;
+ unsigned long missing_tx_completion_to;
+
char name[ENA_NAME_MAX_LEN];
unsigned long flags;
@@ -322,6 +338,8 @@ struct ena_adapter {
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
+
+ enum ena_regs_reset_reason_types reset_reason;
};
void ena_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 26097a2b6030..9aec43c5bba8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -32,6 +32,36 @@
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -78,6 +108,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
@@ -102,6 +134,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 86369d7c9a0f..7f60d17819ce 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -731,12 +731,10 @@ static int pcnet32_get_link_ksettings(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- int r = -EOPNOTSUPP;
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
- r = 0;
} else if (lp->chip_version == PCNET32_79C970A) {
if (lp->autoneg) {
cmd->base.autoneg = AUTONEG_ENABLE;
@@ -753,10 +751,9 @@ static int pcnet32_get_link_ksettings(struct net_device *dev,
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported,
SUPPORTED_TP | SUPPORTED_AUI);
- r = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
- return r;
+ return 0;
}
static int pcnet32_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 127adbeefb10..9795419aac2d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -123,38 +123,13 @@
#define DMA_ISR 0x3008
#define DMA_AXIARCR 0x3010
#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWARCR 0x301c
#define DMA_DSR0 0x3020
#define DMA_DSR1 0x3024
+#define DMA_TXEDMACR 0x3040
+#define DMA_RXEDMACR 0x3044
/* DMA register entry bit positions and sizes */
-#define DMA_AXIARCR_DRC_INDEX 0
-#define DMA_AXIARCR_DRC_WIDTH 4
-#define DMA_AXIARCR_DRD_INDEX 4
-#define DMA_AXIARCR_DRD_WIDTH 2
-#define DMA_AXIARCR_TEC_INDEX 8
-#define DMA_AXIARCR_TEC_WIDTH 4
-#define DMA_AXIARCR_TED_INDEX 12
-#define DMA_AXIARCR_TED_WIDTH 2
-#define DMA_AXIARCR_THC_INDEX 16
-#define DMA_AXIARCR_THC_WIDTH 4
-#define DMA_AXIARCR_THD_INDEX 20
-#define DMA_AXIARCR_THD_WIDTH 2
-#define DMA_AXIAWCR_DWC_INDEX 0
-#define DMA_AXIAWCR_DWC_WIDTH 4
-#define DMA_AXIAWCR_DWD_INDEX 4
-#define DMA_AXIAWCR_DWD_WIDTH 2
-#define DMA_AXIAWCR_RPC_INDEX 8
-#define DMA_AXIAWCR_RPC_WIDTH 4
-#define DMA_AXIAWCR_RPD_INDEX 12
-#define DMA_AXIAWCR_RPD_WIDTH 2
-#define DMA_AXIAWCR_RHC_INDEX 16
-#define DMA_AXIAWCR_RHC_WIDTH 4
-#define DMA_AXIAWCR_RHD_INDEX 20
-#define DMA_AXIAWCR_RHD_WIDTH 2
-#define DMA_AXIAWCR_TDC_INDEX 24
-#define DMA_AXIAWCR_TDC_WIDTH 4
-#define DMA_AXIAWCR_TDD_INDEX 28
-#define DMA_AXIAWCR_TDD_WIDTH 2
#define DMA_ISR_MACIS_INDEX 17
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
@@ -163,14 +138,31 @@
#define DMA_MR_INTM_WIDTH 2
#define DMA_MR_SWR_INDEX 0
#define DMA_MR_SWR_WIDTH 1
+#define DMA_RXEDMACR_RDPS_INDEX 0
+#define DMA_RXEDMACR_RDPS_WIDTH 3
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
#define DMA_SBMR_EAME_INDEX 11
#define DMA_SBMR_EAME_WIDTH 1
-#define DMA_SBMR_BLEN_256_INDEX 7
-#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_INDEX 1
+#define DMA_SBMR_BLEN_WIDTH 7
+#define DMA_SBMR_RD_OSR_LMT_INDEX 16
+#define DMA_SBMR_RD_OSR_LMT_WIDTH 6
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
+#define DMA_SBMR_WR_OSR_LMT_INDEX 24
+#define DMA_SBMR_WR_OSR_LMT_WIDTH 6
+#define DMA_TXEDMACR_TDPS_INDEX 0
+#define DMA_TXEDMACR_TDPS_WIDTH 3
/* DMA register values */
+#define DMA_SBMR_BLEN_256 256
+#define DMA_SBMR_BLEN_128 128
+#define DMA_SBMR_BLEN_64 64
+#define DMA_SBMR_BLEN_32 32
+#define DMA_SBMR_BLEN_16 16
+#define DMA_SBMR_BLEN_8 8
+#define DMA_SBMR_BLEN_4 4
#define DMA_DSR_RPS_WIDTH 4
#define DMA_DSR_TPS_WIDTH 4
#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
@@ -959,6 +951,7 @@
#define XP_DRIVER_INT_RO 0x0064
#define XP_DRIVER_SCRATCH_0 0x0068
#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_REISSUE_EN 0x0074
#define XP_INT_EN 0x0078
#define XP_I2C_MUTEX 0x0080
#define XP_MDIO_MUTEX 0x0084
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 0a98c369df20..45d92304068e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -176,8 +176,8 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_free_ring_resources\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
xgbe_free_ring(pdata, channel->tx_ring);
xgbe_free_ring(pdata, channel->rx_ring);
}
@@ -185,34 +185,60 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_ring_resources\n");
}
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
+ dma_addr_t *dma, int node)
+{
+ void *mem;
+ int cur_node = dev_to_node(dev);
+
+ set_dev_node(dev, node);
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+ set_dev_node(dev, cur_node);
+
+ if (!mem)
+ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
+
+ return mem;
+}
+
static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count)
{
- DBGPR("-->xgbe_init_ring\n");
+ size_t size;
if (!ring)
return 0;
/* Descriptors */
+ size = rdesc_count * sizeof(struct xgbe_ring_desc);
+
ring->rdesc_count = rdesc_count;
- ring->rdesc = dma_alloc_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- rdesc_count), &ring->rdesc_dma,
- GFP_KERNEL);
+ ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
+ ring->node);
if (!ring->rdesc)
return -ENOMEM;
/* Descriptor information */
- ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
- GFP_KERNEL);
+ size = rdesc_count * sizeof(struct xgbe_ring_data);
+
+ ring->rdata = xgbe_alloc_node(size, ring->node);
if (!ring->rdata)
return -ENOMEM;
netif_dbg(pdata, drv, pdata->netdev,
- "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
- ring->rdesc, &ring->rdesc_dma, ring->rdata);
-
- DBGPR("<--xgbe_init_ring\n");
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
return 0;
}
@@ -223,10 +249,8 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- DBGPR("-->xgbe_alloc_ring_resources\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
channel->name);
@@ -250,8 +274,6 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
}
}
- DBGPR("<--xgbe_alloc_ring_resources\n");
-
return 0;
err_ring:
@@ -261,21 +283,33 @@ err_ring:
}
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+ struct xgbe_page_alloc *pa, int alloc_order,
+ int node)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
- int ret;
+ gfp_t gfp;
+ int order, ret;
+
+again:
+ order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
- pages = alloc_pages(gfp, order);
+ pages = alloc_pages_node(node, gfp, order);
if (pages)
break;
order--;
}
+
+ /* If we couldn't get local pages, try getting from anywhere */
+ if (!pages && (node != NUMA_NO_NODE)) {
+ node = NUMA_NO_NODE;
+ goto again;
+ }
+
if (!pages)
return -ENOMEM;
@@ -327,14 +361,14 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
int ret;
if (!ring->rx_hdr_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
if (ret)
return ret;
}
if (!ring->rx_buf_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
- PAGE_ALLOC_COSTLY_ORDER);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
+ PAGE_ALLOC_COSTLY_ORDER, ring->node);
if (ret)
return ret;
}
@@ -362,8 +396,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
ring = channel->tx_ring;
if (!ring)
break;
@@ -403,8 +437,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
ring = channel->rx_ring;
if (!ring)
break;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 24a687ce4388..06f953e1e9b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -174,58 +174,30 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
return ret;
}
-static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
+ unsigned int pblx8, pbl;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
- pdata->pblx8);
-
- return 0;
-}
-
-static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
-}
-
-static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
+ pblx8 = DMA_PBL_X8_DISABLE;
+ pbl = pdata->pbl;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
- pdata->tx_pbl);
+ if (pdata->pbl > 32) {
+ pblx8 = DMA_PBL_X8_ENABLE;
+ pbl >>= 3;
}
- return 0;
-}
-
-static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
-}
-
-static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
+ for (i = 0; i < pdata->channel_count; i++) {
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+ pblx8);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
- break;
+ if (pdata->channel[i]->tx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
+ PBL, pbl);
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
- pdata->rx_pbl);
+ if (pdata->channel[i]->rx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
+ PBL, pbl);
}
return 0;
@@ -233,15 +205,13 @@ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
pdata->tx_osp_mode);
}
@@ -292,15 +262,13 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
pdata->rx_riwt);
}
@@ -314,44 +282,38 @@ static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
pdata->rx_buf_size);
}
}
static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
}
}
static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
@@ -651,8 +613,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
pdata->channel_irq_mode);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
/* Clear all the interrupts which are set */
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -1497,26 +1460,37 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
unsigned int addend)
{
+ unsigned int count = 10000;
+
/* Set the addend register value and tell the device */
XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
/* Wait for addend update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
}
static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
unsigned int nsec)
{
+ unsigned int count = 10000;
+
/* Set the time values and tell the device */
XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
/* Wait for time update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
}
static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
@@ -2140,37 +2114,38 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
{
+ unsigned int sbmr;
+
+ sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
+
/* Set enhanced addressing mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
/* Set the System Bus mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
+
+ XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
+
+ /* Set descriptor fetching threshold */
+ if (pdata->vdata->tx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
+ pdata->vdata->tx_desc_prefetch);
+
+ if (pdata->vdata->rx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
+ pdata->vdata->rx_desc_prefetch);
}
static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
{
- unsigned int arcache, awcache;
-
- arcache = 0;
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
-
- awcache = 0;
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
+ if (pdata->awarcr)
+ XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
}
static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
@@ -3202,16 +3177,14 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable each Tx queue */
@@ -3225,7 +3198,6 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
@@ -3240,12 +3212,11 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
@@ -3277,16 +3248,14 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int reg_val, i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
/* Enable each Rx queue */
@@ -3304,7 +3273,6 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable MAC Rx */
@@ -3321,27 +3289,24 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable MAC Tx */
@@ -3350,7 +3315,6 @@ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
@@ -3361,42 +3325,37 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
}
static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
@@ -3420,9 +3379,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_dma_bus(pdata);
xgbe_config_dma_cache(pdata);
xgbe_config_osp_mode(pdata);
- xgbe_config_pblx8(pdata);
- xgbe_config_tx_pbl_val(pdata);
- xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_pbl_val(pdata);
xgbe_config_rx_coalesce(pdata);
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
@@ -3550,13 +3507,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
/* For TX DMA Operating on Second Frame config */
hw_if->config_osp_mode = xgbe_config_osp_mode;
- /* For RX and TX PBL config */
- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
- hw_if->config_pblx8 = xgbe_config_pblx8;
-
/* For MMC statistics support */
hw_if->tx_mmc_int = xgbe_tx_mmc_int;
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c772420fa41c..ecef3ee87b17 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -158,81 +158,106 @@ static int xgbe_one_poll(struct napi_struct *, int);
static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_stop(struct xgbe_prv_data *);
+static void *xgbe_alloc_node(size_t size, int node)
+{
+ void *mem;
+
+ mem = kzalloc_node(size, GFP_KERNEL, node);
+ if (!mem)
+ mem = kzalloc(size, GFP_KERNEL);
+
+ return mem;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
+ if (!pdata->channel[i])
+ continue;
+
+ kfree(pdata->channel[i]->rx_ring);
+ kfree(pdata->channel[i]->tx_ring);
+ kfree(pdata->channel[i]);
+
+ pdata->channel[i] = NULL;
+ }
+
+ pdata->channel_count = 0;
+}
+
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel_mem, *channel;
- struct xgbe_ring *tx_ring, *rx_ring;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
unsigned int count, i;
- int ret = -ENOMEM;
+ unsigned int cpu;
+ int node;
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+ for (i = 0; i < count; i++) {
+ /* Attempt to use a CPU on the node the device is on */
+ cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
- channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- goto err_channel;
-
- tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!tx_ring)
- goto err_tx_ring;
+ /* Set the allocation node based on the returned CPU */
+ node = cpu_to_node(cpu);
- rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!rx_ring)
- goto err_rx_ring;
+ channel = xgbe_alloc_node(sizeof(*channel), node);
+ if (!channel)
+ goto err_mem;
+ pdata->channel[i] = channel;
- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
(DMA_CH_INC * i);
+ channel->node = node;
+ cpumask_set_cpu(cpu, &channel->affinity_mask);
if (pdata->per_channel_irq)
channel->dma_irq = pdata->channel_irq[i];
if (i < pdata->tx_ring_count) {
- spin_lock_init(&tx_ring->lock);
- channel->tx_ring = tx_ring++;
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->tx_ring = ring;
}
if (i < pdata->rx_ring_count) {
- spin_lock_init(&rx_ring->lock);
- channel->rx_ring = rx_ring++;
+ ring = xgbe_alloc_node(sizeof(*ring), node);
+ if (!ring)
+ goto err_mem;
+
+ spin_lock_init(&ring->lock);
+ ring->node = node;
+
+ channel->rx_ring = ring;
}
netif_dbg(pdata, drv, pdata->netdev,
+ "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
+
+ netif_dbg(pdata, drv, pdata->netdev,
"%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
channel->name, channel->dma_regs, channel->dma_irq,
channel->tx_ring, channel->rx_ring);
}
- pdata->channel = channel_mem;
pdata->channel_count = count;
return 0;
-err_rx_ring:
- kfree(tx_ring);
-
-err_tx_ring:
- kfree(channel_mem);
-
-err_channel:
- return ret;
-}
-
-static void xgbe_free_channels(struct xgbe_prv_data *pdata)
-{
- if (!pdata->channel)
- return;
-
- kfree(pdata->channel->rx_ring);
- kfree(pdata->channel->tx_ring);
- kfree(pdata->channel);
+err_mem:
+ xgbe_free_channels(pdata);
- pdata->channel = NULL;
- pdata->channel_count = 0;
+ return -ENOMEM;
}
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
@@ -301,12 +326,10 @@ static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_enable_rx_tx_int(pdata, channel);
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
}
static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
@@ -329,12 +352,10 @@ static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_disable_rx_tx_int(pdata, channel);
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
}
static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
@@ -382,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+static void xgbe_ecc_isr_task(unsigned long data)
{
- struct xgbe_prv_data *pdata = data;
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
unsigned int ecc_isr;
bool stop = false;
@@ -435,12 +456,26 @@ out:
/* Clear all ECC interrupts */
XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
- return IRQ_HANDLED;
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
}
-static irqreturn_t xgbe_isr(int irq, void *data)
+static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_ecc);
+ else
+ xgbe_ecc_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_isr_task(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -461,7 +496,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!(dma_isr & (1 << i)))
continue;
- channel = pdata->channel + i;
+ channel = pdata->channel[i];
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
@@ -543,15 +578,36 @@ static irqreturn_t xgbe_isr(int irq, void *data)
isr_done:
/* If there is not a separate AN irq, handle it here */
if (pdata->dev_irq == pdata->an_irq)
- pdata->phy_if.an_isr(irq, pdata);
+ pdata->phy_if.an_isr(pdata);
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr(irq, pdata);
+ xgbe_ecc_isr_task((unsigned long)pdata);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
- pdata->i2c_if.i2c_isr(irq, pdata);
+ pdata->i2c_if.i2c_isr(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support) {
+ unsigned int reissue_mask;
+
+ reissue_mask = 1 << 0;
+ if (!pdata->per_channel_irq)
+ reissue_mask |= 0xffff < 4;
+
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_dev);
+ else
+ xgbe_isr_task((unsigned long)pdata);
return IRQ_HANDLED;
}
@@ -640,8 +696,8 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
setup_timer(&pdata->service_timer, xgbe_service_timer,
(unsigned long)pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
break;
@@ -662,8 +718,8 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
del_timer_sync(&pdata->service_timer);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
break;
@@ -781,8 +837,8 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
unsigned int i;
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
xgbe_one_poll, NAPI_POLL_WEIGHT);
@@ -804,8 +860,8 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
unsigned int i;
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
napi_disable(&channel->napi);
if (del)
@@ -826,6 +882,10 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
+ tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
+ tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata);
if (ret) {
@@ -847,8 +907,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
if (!pdata->per_channel_irq)
return 0;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
snprintf(channel->dma_irq_name,
sizeof(channel->dma_irq_name) - 1,
"%s-TxRx-%u", netdev_name(netdev),
@@ -862,14 +922,21 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
channel->dma_irq);
goto err_dma_irq;
}
+
+ irq_set_affinity_hint(channel->dma_irq,
+ &channel->affinity_mask);
}
return 0;
err_dma_irq:
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ for (i--; i < pdata->channel_count; i--) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
@@ -893,9 +960,12 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
if (!pdata->per_channel_irq)
return;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+ irq_set_affinity_hint(channel->dma_irq, NULL);
devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -930,16 +1000,14 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_tx_data\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->tx_ring;
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->tx_ring;
if (!ring)
break;
@@ -955,16 +1023,14 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
unsigned int i, j;
DBGPR("-->xgbe_free_rx_data\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->rx_ring;
+ for (i = 0; i < pdata->channel_count; i++) {
+ ring = pdata->channel[i]->rx_ring;
if (!ring)
break;
@@ -1140,8 +1206,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
hw_if->exit(pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
if (!channel->tx_ring)
continue;
@@ -1212,6 +1278,10 @@ static void xgbe_tx_tstamp(struct work_struct *work)
u64 nsec;
unsigned long flags;
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
if (pdata->tx_tstamp) {
nsec = timecounter_cyc2time(&pdata->tstamp_tc,
pdata->tx_tstamp);
@@ -1223,8 +1293,9 @@ static void xgbe_tx_tstamp(struct work_struct *work)
dev_kfree_skb_any(pdata->tx_tstamp_skb);
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
pdata->tx_tstamp_skb = NULL;
+
+unlock:
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
}
@@ -1268,6 +1339,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
case HWTSTAMP_FILTER_NONE:
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
@@ -1390,8 +1462,7 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
}
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
}
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1623,7 +1694,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
- channel = pdata->channel + skb->queue_mapping;
+ channel = pdata->channel[skb->queue_mapping];
txq = netdev_get_tx_queue(netdev, channel->queue_index);
ring = channel->tx_ring;
packet = &ring->packet_data;
@@ -1833,9 +1904,10 @@ static void xgbe_poll_controller(struct net_device *netdev)
DBGPR("-->xgbe_poll_controller\n");
if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
xgbe_dma_isr(channel->dma_irq, channel);
+ }
} else {
disable_irq(pdata->dev_irq);
xgbe_isr(pdata->dev_irq, pdata);
@@ -1846,7 +1918,8 @@ static void xgbe_poll_controller(struct net_device *netdev)
}
#endif /* End CONFIG_NET_POLL_CONTROLLER */
-static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto,
struct tc_to_netdev *tc_to_netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -2327,8 +2400,9 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
do {
last_processed = processed;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
/* Cleanup Tx ring first */
xgbe_tx_poll(channel);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 920566a3a599..67a2e52ad25d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -247,7 +247,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
netdev_err(netdev,
- "autoneg disabled, pause autoneg not avialable\n");
+ "autoneg disabled, pause autoneg not available\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 417bdb5982a9..4d9062d35930 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,13 +274,16 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+static void xgbe_i2c_isr_task(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+ if (!isr)
+ goto reissue_check;
+
netif_dbg(pdata, intr, pdata->netdev,
"I2C interrupt received: status=%#010x\n", isr);
@@ -308,6 +311,21 @@ out:
if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
complete(&pdata->i2c_complete);
+reissue_check:
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2);
+}
+
+static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_i2c);
+ else
+ xgbe_i2c_isr_task((unsigned long)pdata);
+
return IRQ_HANDLED;
}
@@ -349,12 +367,11 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
XI2C_IOWRITE(pdata, IC_TAR, addr);
}
-static irqreturn_t xgbe_i2c_combined_isr(int irq, struct xgbe_prv_data *pdata)
+static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- if (!XI2C_IOREAD(pdata, IC_RAW_INTR_STAT))
- return IRQ_HANDLED;
+ xgbe_i2c_isr_task((unsigned long)pdata);
- return xgbe_i2c_isr(irq, pdata);
+ return IRQ_HANDLED;
}
static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
@@ -445,6 +462,9 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
+ tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 17ac8f9a51a0..500147d9e3c8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -140,14 +140,16 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
- pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->blen = DMA_SBMR_BLEN_64;
+ pdata->pbl = DMA_PBL_128;
+ pdata->aal = 1;
+ pdata->rd_osr_limit = 8;
+ pdata->wr_osr_limit = 8;
pdata->tx_sf_mode = MTL_TSF_ENABLE;
pdata->tx_threshold = MTL_TX_THRESHOLD_64;
- pdata->tx_pbl = DMA_PBL_16;
pdata->tx_osp_mode = DMA_OSP_ENABLE;
pdata->rx_sf_mode = MTL_RSF_DISABLE;
pdata->rx_threshold = MTL_RX_THRESHOLD_64;
- pdata->rx_pbl = DMA_PBL_16;
pdata->pause_autoneg = 1;
pdata->tx_pause = 1;
pdata->rx_pause = 1;
@@ -277,7 +279,11 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
pdata->desc_ded_period = jiffies;
/* Issue software reset to device */
- pdata->hw_if.exit(pdata);
+ ret = pdata->hw_if.exit(pdata);
+ if (ret) {
+ dev_err(dev, "software reset failed\n");
+ return ret;
+ }
/* Set default configuration data */
xgbe_default_config(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index b672d9249539..80684914dd8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -665,6 +665,10 @@ static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
} else {
/* Enable AN interrupts */
xgbe_an37_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
}
}
@@ -684,10 +688,14 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
} else {
/* Enable AN interrupts */
xgbe_an73_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
}
}
-static irqreturn_t xgbe_an_isr(int irq, void *data)
+static void xgbe_an_isr_task(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
@@ -705,13 +713,25 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
default:
break;
}
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ if (pdata->isr_as_tasklet)
+ tasklet_schedule(&pdata->tasklet_an);
+ else
+ xgbe_an_isr_task((unsigned long)pdata);
return IRQ_HANDLED;
}
-static irqreturn_t xgbe_an_combined_isr(int irq, struct xgbe_prv_data *pdata)
+static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- return xgbe_an_isr(irq, pdata);
+ xgbe_an_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
}
static void xgbe_an_irq_work(struct work_struct *work)
@@ -915,6 +935,10 @@ static void xgbe_an_state_machine(struct work_struct *work)
break;
}
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+
mutex_unlock(&pdata->an_mutex);
}
@@ -1379,6 +1403,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
+ tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
+ (unsigned long)pdata);
+
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 38392a520725..1e56ad7bd9a5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,6 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
+ pdata->isr_as_tasklet = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -175,6 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
return ret;
}
+ pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
@@ -325,9 +327,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the DMA coherency values */
pdata->coherent = 1;
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ pdata->arcr = XGBE_DMA_PCI_ARCR;
+ pdata->awcr = XGBE_DMA_PCI_AWCR;
+ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
/* Set the maximum channels and queues */
reg = XP_IOREAD(pdata, XP_PROP_1);
@@ -445,6 +447,9 @@ static const struct xgbe_version_data xgbe_v2a = {
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
};
static const struct xgbe_version_data xgbe_v2b = {
@@ -456,6 +461,9 @@ static const struct xgbe_version_data xgbe_v2b = {
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index e707c49cc55a..04b5c149caca 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -711,23 +711,39 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
+ return;
+
+ pdata->phy.supported &= ~SUPPORTED_Autoneg;
+ pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ pdata->phy.supported &= ~SUPPORTED_TP;
+ pdata->phy.supported &= ~SUPPORTED_FIBRE;
+ pdata->phy.supported &= ~SUPPORTED_100baseT_Full;
+ pdata->phy.supported &= ~SUPPORTED_1000baseT_Full;
+ pdata->phy.supported &= ~SUPPORTED_10000baseT_Full;
+
if (phy_data->sfp_mod_absent) {
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+
pdata->phy.advertising = pdata->phy.supported;
return;
}
- pdata->phy.advertising &= ~ADVERTISED_Autoneg;
- pdata->phy.advertising &= ~ADVERTISED_TP;
- pdata->phy.advertising &= ~ADVERTISED_FIBRE;
- pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
- pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
-
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
case XGBE_SFP_BASE_1000_SX:
@@ -736,17 +752,25 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
pdata->phy.speed = SPEED_UNKNOWN;
pdata->phy.duplex = DUPLEX_UNKNOWN;
pdata->phy.autoneg = AUTONEG_ENABLE;
- pdata->phy.advertising |= ADVERTISED_Autoneg;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
break;
case XGBE_SFP_BASE_10000_SR:
case XGBE_SFP_BASE_10000_LR:
case XGBE_SFP_BASE_10000_LRM:
case XGBE_SFP_BASE_10000_ER:
case XGBE_SFP_BASE_10000_CR:
- default:
pdata->phy.speed = SPEED_10000;
pdata->phy.duplex = DUPLEX_FULL;
pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ break;
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
break;
}
@@ -754,36 +778,38 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
case XGBE_SFP_BASE_1000_T:
case XGBE_SFP_BASE_1000_CX:
case XGBE_SFP_BASE_10000_CR:
- pdata->phy.advertising |= ADVERTISED_TP;
+ pdata->phy.supported |= SUPPORTED_TP;
break;
default:
- pdata->phy.advertising |= ADVERTISED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
}
switch (phy_data->sfp_speed) {
case XGBE_SFP_SPEED_100_1000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
break;
case XGBE_SFP_SPEED_1000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
break;
case XGBE_SFP_SPEED_10000:
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
break;
default:
/* Choose the fastest supported speed */
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
}
+
+ pdata->phy.advertising = pdata->phy.supported;
}
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
@@ -1095,7 +1121,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
ret = xgbe_phy_sfp_get_mux(pdata);
if (ret) {
- netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
+ dev_err_once(pdata->dev, "%s: I2C error setting SFP MUX\n",
+ netdev_name(pdata->netdev));
return ret;
}
@@ -1105,7 +1132,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
&eeprom_addr, sizeof(eeprom_addr),
&sfp_eeprom, sizeof(sfp_eeprom));
if (ret) {
- netdev_err(pdata->netdev, "I2C error reading SFP EEPROM\n");
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP EEPROM\n",
+ netdev_name(pdata->netdev));
goto put;
}
@@ -1164,7 +1192,8 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
&gpio_reg, sizeof(gpio_reg),
gpio_ports, sizeof(gpio_ports));
if (ret) {
- netdev_err(pdata->netdev, "I2C error reading SFP GPIOs\n");
+ dev_err_once(pdata->dev, "%s: I2C error reading SFP GPIOs\n",
+ netdev_name(pdata->netdev));
return;
}
@@ -1694,19 +1723,25 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
-static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int cmd, unsigned int sub_cmd)
{
- if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- return;
+ unsigned int s0 = 0;
+ unsigned int wait;
/* Log if a previous command did not complete */
- netif_dbg(pdata, link, pdata->netdev,
- "firmware mailbox not ready for command\n");
-}
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
-static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
-{
- unsigned int wait;
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd);
+
+ /* Issue the command */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
/* Wait for command to complete */
wait = XGBE_RATECHANGE_COUNT;
@@ -1723,21 +1758,8 @@ static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
{
- unsigned int s0;
-
- xgbe_phy_start_ratechange(pdata);
-
/* Receiver Reset Cycle */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 5, 0);
netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
}
@@ -1746,14 +1768,8 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- xgbe_phy_start_ratechange(pdata);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ /* Power off */
+ xgbe_phy_perform_ratechange(pdata, 0, 0);
phy_data->cur_mode = XGBE_MODE_UNKNOWN;
@@ -1763,33 +1779,21 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 10G/SFI */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ xgbe_phy_perform_ratechange(pdata, 3, 0);
} else {
if (phy_data->sfp_cable_len <= 1)
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ xgbe_phy_perform_ratechange(pdata, 3, 1);
else if (phy_data->sfp_cable_len <= 3)
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ xgbe_phy_perform_ratechange(pdata, 3, 2);
else
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ xgbe_phy_perform_ratechange(pdata, 3, 3);
}
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
-
phy_data->cur_mode = XGBE_MODE_SFI;
netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
@@ -1798,23 +1802,11 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/X */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
phy_data->cur_mode = XGBE_MODE_X;
@@ -1824,23 +1816,11 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/SGMII */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 2);
phy_data->cur_mode = XGBE_MODE_SGMII_1000;
@@ -1850,23 +1830,11 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
- /* 1G/SGMII */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ /* 100M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 1);
phy_data->cur_mode = XGBE_MODE_SGMII_100;
@@ -1876,23 +1844,11 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 10G/KR */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 4, 0);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -1902,23 +1858,11 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 2.5G/KX */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 2, 0);
phy_data->cur_mode = XGBE_MODE_KX_2500;
@@ -1928,23 +1872,11 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int s0;
xgbe_phy_set_redrv_mode(pdata);
- xgbe_phy_start_ratechange(pdata);
-
/* 1G/KX */
- s0 = 0;
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
-
- /* Call FW to make the change */
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
-
- xgbe_phy_complete_ratechange(pdata);
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
phy_data->cur_mode = XGBE_MODE_KX_1000;
@@ -2037,6 +1969,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
return XGBE_MODE_SGMII_100;
case SPEED_1000:
return XGBE_MODE_SGMII_1000;
+ case SPEED_2500:
+ return XGBE_MODE_KX_2500;
case SPEED_10000:
return XGBE_MODE_KR;
default:
@@ -2180,6 +2114,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_SGMII_1000:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
case XGBE_MODE_KR:
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseT_Full);
@@ -2210,6 +2147,8 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_1000baseT_Full);
case XGBE_MODE_SFI:
+ if (phy_data->sfp_mod_absent)
+ return true;
return xgbe_phy_check_mode(pdata, mode,
ADVERTISED_10000baseT_Full);
default:
@@ -2287,6 +2226,8 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
case SPEED_100:
case SPEED_1000:
return true;
+ case SPEED_2500:
+ return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -3013,9 +2954,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
pdata->phy.supported |= SUPPORTED_10000baseT_Full;
phy_data->start_mode = XGBE_MODE_SFI;
- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
- pdata->phy.supported |=
- SUPPORTED_10000baseR_FEC;
}
phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 84d4c51cab8c..d0f3dfb88202 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -448,13 +448,11 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ pdata->arcr = XGBE_DMA_OS_ARCR;
+ pdata->awcr = XGBE_DMA_OS_AWCR;
} else {
- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ pdata->arcr = XGBE_DMA_SYS_ARCR;
+ pdata->awcr = XGBE_DMA_SYS_AWCR;
}
/* Set the maximum fifo amounts */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a533a6cc2d53..d06d260cf1e2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -267,7 +267,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
ktime_to_ns(ktime_get_real()));
/* Disable all timestamping to start */
- XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+ XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9a24639f574..0938294f640a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -128,6 +128,7 @@
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
#include <linux/completion.h>
+#include <linux/cpumask.h>
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.3"
@@ -163,14 +164,17 @@
#define XGBE_DMA_STOP_TIMEOUT 1
/* DMA cache settings - Outer sharable, write-back, write-allocate */
-#define XGBE_DMA_OS_AXDOMAIN 0x2
-#define XGBE_DMA_OS_ARCACHE 0xb
-#define XGBE_DMA_OS_AWCACHE 0xf
+#define XGBE_DMA_OS_ARCR 0x002b2b2b
+#define XGBE_DMA_OS_AWCR 0x2f2f2f2f
/* DMA cache settings - System, no caches used */
-#define XGBE_DMA_SYS_AXDOMAIN 0x3
-#define XGBE_DMA_SYS_ARCACHE 0x0
-#define XGBE_DMA_SYS_AWCACHE 0x0
+#define XGBE_DMA_SYS_ARCR 0x00303030
+#define XGBE_DMA_SYS_AWCR 0x30303030
+
+/* DMA cache settings - PCI device */
+#define XGBE_DMA_PCI_ARCR 0x00000003
+#define XGBE_DMA_PCI_AWCR 0x13131313
+#define XGBE_DMA_PCI_AWARCR 0x00000313
/* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0
@@ -412,6 +416,7 @@ struct xgbe_ring {
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
+ int node;
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
@@ -462,6 +467,9 @@ struct xgbe_channel {
struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring;
+
+ int node;
+ cpumask_t affinity_mask;
} ____cacheline_aligned;
enum xgbe_state {
@@ -734,13 +742,6 @@ struct xgbe_hw_if {
/* For TX DMA Operate on Second Frame config */
int (*config_osp_mode)(struct xgbe_prv_data *);
- /* For RX and TX PBL config */
- int (*config_rx_pbl_val)(struct xgbe_prv_data *);
- int (*get_rx_pbl_val)(struct xgbe_prv_data *);
- int (*config_tx_pbl_val)(struct xgbe_prv_data *);
- int (*get_tx_pbl_val)(struct xgbe_prv_data *);
- int (*config_pblx8)(struct xgbe_prv_data *);
-
/* For MMC statistics */
void (*rx_mmc_int)(struct xgbe_prv_data *);
void (*tx_mmc_int)(struct xgbe_prv_data *);
@@ -837,7 +838,7 @@ struct xgbe_phy_if {
bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
/* For single interrupt support */
- irqreturn_t (*an_isr)(int, struct xgbe_prv_data *);
+ irqreturn_t (*an_isr)(struct xgbe_prv_data *);
/* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl;
@@ -855,7 +856,7 @@ struct xgbe_i2c_if {
int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
/* For single interrupt support */
- irqreturn_t (*i2c_isr)(int, struct xgbe_prv_data *);
+ irqreturn_t (*i2c_isr)(struct xgbe_prv_data *);
};
struct xgbe_desc_if {
@@ -924,6 +925,9 @@ struct xgbe_version_data {
unsigned int tx_tstamp_workaround;
unsigned int ecc_support;
unsigned int i2c_support;
+ unsigned int irq_reissue_support;
+ unsigned int tx_desc_prefetch;
+ unsigned int rx_desc_prefetch;
};
struct xgbe_prv_data {
@@ -1001,9 +1005,9 @@ struct xgbe_prv_data {
/* AXI DMA settings */
unsigned int coherent;
- unsigned int axdomain;
- unsigned int arcache;
- unsigned int awcache;
+ unsigned int arcr;
+ unsigned int awcr;
+ unsigned int awarcr;
/* Service routine support */
struct workqueue_struct *dev_workqueue;
@@ -1011,7 +1015,7 @@ struct xgbe_prv_data {
struct timer_list service_timer;
/* Rings for Tx/Rx on a DMA channel */
- struct xgbe_channel *channel;
+ struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS];
unsigned int tx_max_channel_count;
unsigned int rx_max_channel_count;
unsigned int channel_count;
@@ -1026,19 +1030,21 @@ struct xgbe_prv_data {
unsigned int rx_q_count;
/* Tx/Rx common settings */
- unsigned int pblx8;
+ unsigned int blen;
+ unsigned int pbl;
+ unsigned int aal;
+ unsigned int rd_osr_limit;
+ unsigned int wr_osr_limit;
/* Tx settings */
unsigned int tx_sf_mode;
unsigned int tx_threshold;
- unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
/* Rx settings */
unsigned int rx_sf_mode;
unsigned int rx_threshold;
- unsigned int rx_pbl;
unsigned int rx_max_fifo_size;
/* Tx coalescing settings */
@@ -1159,6 +1165,12 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
+ unsigned int isr_as_tasklet;
+ struct tasklet_struct tasklet_dev;
+ struct tasklet_struct tasklet_ecc;
+ struct tasklet_struct tasklet_i2c;
+ struct tasklet_struct tasklet_an;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
index b6666e418e79..d31ad8270d93 100644
--- a/drivers/net/ethernet/apm/xgene-v2/ethtool.c
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -157,7 +157,9 @@ static int xge_get_link_ksettings(struct net_device *ndev,
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
static int xge_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 28fdedc30b74..4f50f11718f4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -23,9 +23,17 @@
struct xgene_gstrings_stats {
char name[ETH_GSTRING_LEN];
int offset;
+ u32 addr;
+ u32 mask;
};
-#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) }
+#define XGENE_STAT(m) { #m, offsetof(struct rtnl_link_stats64, m) }
+#define XGENE_EXTD_STAT(s, a, m) \
+ { \
+ .name = #s, \
+ .addr = a ## _ADDR, \
+ .mask = m \
+ }
static const struct xgene_gstrings_stats gstrings_stats[] = {
XGENE_STAT(rx_packets),
@@ -40,7 +48,65 @@ static const struct xgene_gstrings_stats gstrings_stats[] = {
XGENE_STAT(rx_fifo_errors)
};
+static const struct xgene_gstrings_stats gstrings_extd_stats[] = {
+ XGENE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64, 31),
+ XGENE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127, 31),
+ XGENE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255, 31),
+ XGENE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511, 31),
+ XGENE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K, 31),
+ XGENE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX, 31),
+ XGENE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV, 31),
+ XGENE_EXTD_STAT(rx_fcs_error_cntr, RFCS, 16),
+ XGENE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA, 31),
+ XGENE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA, 31),
+ XGENE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF, 16),
+ XGENE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF, 16),
+ XGENE_EXTD_STAT(rx_unk_opcode_cntr, RXUO, 16),
+ XGENE_EXTD_STAT(rx_align_err_cntr, RALN, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_cntr, RFLR, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_code_err_cntr, RCDE, 16),
+ XGENE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE, 16),
+ XGENE_EXTD_STAT(rx_undersize_pkt_cntr, RUND, 16),
+ XGENE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR, 16),
+ XGENE_EXTD_STAT(rx_fragments_cntr, RFRG, 16),
+ XGENE_EXTD_STAT(rx_jabber_cntr, RJBR, 16),
+ XGENE_EXTD_STAT(rx_jabber_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP, 16),
+ XGENE_EXTD_STAT(rx_overrun_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA, 31),
+ XGENE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA, 31),
+ XGENE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF, 16),
+ XGENE_EXTD_STAT(tx_defer_pkt_cntr, TDFR, 31),
+ XGENE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF, 31),
+ XGENE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL, 31),
+ XGENE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL, 31),
+ XGENE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL, 31),
+ XGENE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL, 31),
+ XGENE_EXTD_STAT(tx_total_col_cntr, TNCL, 31),
+ XGENE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH, 16),
+ XGENE_EXTD_STAT(tx_drop_frame_cntr, TDRP, 16),
+ XGENE_EXTD_STAT(tx_jabber_frame_cntr, TJBR, 12),
+ XGENE_EXTD_STAT(tx_fcs_error_cntr, TFCS, 12),
+ XGENE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF, 12),
+ XGENE_EXTD_STAT(tx_oversize_frame_cntr, TOVR, 12),
+ XGENE_EXTD_STAT(tx_undersize_frame_cntr, TUND, 12),
+ XGENE_EXTD_STAT(tx_fragments_cntr, TFRG, 12),
+ XGENE_EXTD_STAT(tx_underrun_cntr, DUMP, 0)
+};
+
#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats)
+#define XGENE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats)
+#define RFCS_IDX 7
+#define RALN_IDX 13
+#define RFLR_IDX 14
+#define FALSE_RFLR_IDX 15
+#define RUND_IDX 18
+#define FALSE_RJBR_IDX 22
+#define RX_OVERRUN_IDX 24
+#define TFCS_IDX 38
+#define TFRG_IDX 42
+#define TX_UNDERRUN_IDX 43
static void xgene_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
@@ -61,17 +127,21 @@ static int xgene_get_link_ksettings(struct net_device *ndev,
struct phy_device *phydev = ndev->phydev;
u32 supported;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (phydev == NULL)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (pdata->mdio_driver) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
@@ -111,7 +181,7 @@ static int xgene_set_link_ksettings(struct net_device *ndev,
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (!phydev)
return -ENODEV;
@@ -142,6 +212,11 @@ static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
}
static int xgene_get_sset_count(struct net_device *ndev, int sset)
@@ -149,18 +224,71 @@ static int xgene_get_sset_count(struct net_device *ndev, int sset)
if (sset != ETH_SS_STATS)
return -EINVAL;
- return XGENE_STATS_LEN;
+ return XGENE_STATS_LEN + XGENE_EXTD_STATS_LEN;
+}
+
+static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata)
+{
+ u32 rx_drop, tx_drop;
+ u32 mask, tmp;
+ int i;
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr);
+ if (gstrings_extd_stats[i].mask) {
+ mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0);
+ pdata->extd_stats[i] += (tmp & mask);
+ }
+ }
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ /* Errata 10GE_10 - SW should intepret RALN as 0 */
+ pdata->extd_stats[RALN_IDX] = 0;
+ } else {
+ /* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */
+ pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX];
+ pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX];
+ pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX];
+ }
+
+ pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop);
+ pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop;
+ pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop;
+
+ /* Errata 10GE_8 - Update Frame recovered from Errata 10GE_8/ENET_11 */
+ pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr;
+ /* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */
+ pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr;
+}
+
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata)
+{
+ pdata->extd_stats = devm_kmalloc_array(&pdata->pdev->dev,
+ XGENE_EXTD_STATS_LEN, sizeof(u64), GFP_KERNEL);
+ if (!pdata->extd_stats)
+ return -ENOMEM;
+
+ xgene_get_extd_stats(pdata);
+ memset(pdata->extd_stats, 0, XGENE_EXTD_STATS_LEN * sizeof(u64));
+
+ return 0;
}
static void xgene_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *dummy,
u64 *data)
{
- void *pdata = netdev_priv(ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct rtnl_link_stats64 stats;
int i;
+ dev_get_stats(ndev, &stats);
for (i = 0; i < XGENE_STATS_LEN; i++)
- *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+ data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset);
+
+ xgene_get_extd_stats(pdata);
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++)
+ data[i + XGENE_STATS_LEN] = pdata->extd_stats[i];
}
static void xgene_get_pauseparam(struct net_device *ndev,
@@ -180,7 +308,7 @@ static int xgene_set_pauseparam(struct net_device *ndev,
struct phy_device *phydev = ndev->phydev;
u32 oldadv, newadv;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (!phydev)
return -EINVAL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 2a835e07adfb..e45b587c2994 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -205,30 +205,24 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
{
switch (status) {
case INGRESS_CRC:
ring->rx_crc_errors++;
- ring->rx_dropped++;
break;
case INGRESS_CHECKSUM:
case INGRESS_CHECKSUM_COMPUTE:
ring->rx_errors++;
- ring->rx_dropped++;
break;
case INGRESS_TRUNC_FRAME:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_LEN:
ring->rx_length_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_UNDER:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_FIFO_OVERRUN:
ring->rx_fifo_errors++;
@@ -270,42 +264,39 @@ static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 wr_addr, u32 wr_data)
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
{
- u32 done;
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
u8 wait = 10;
+ u32 done;
+
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
+ }
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(wr_addr, addr);
iowrite32(wr_data, wr);
iowrite32(XGENE_ENET_WR_CMD, cmd);
- /* wait for write command to complete */
while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
if (!done)
- return false;
+ netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
+ wr_addr, wr_data);
iowrite32(0, cmd);
-
- return true;
-}
-
-static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
-{
- void __iomem *addr, *wr, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
+ spin_unlock(&pdata->mac_lock);
}
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
@@ -332,42 +323,69 @@ static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
*val = ioread32(addr);
}
-static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 rd_addr, u32 *rd_data)
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
{
- u32 done;
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
+ u32 done, rd_data;
u8 wait = 10;
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_rd_mac(bus->priv, rd_addr);
+ }
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ spin_lock(&pdata->mac_lock);
iowrite32(rd_addr, addr);
iowrite32(XGENE_ENET_RD_CMD, cmd);
- /* wait for read command to complete */
while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
if (!done)
- return false;
+ netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
- *rd_data = ioread32(rd);
+ rd_data = ioread32(rd);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
- return true;
+ return rd_data;
}
-static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
{
void __iomem *addr, *rd, *cmd, *cmd_done;
+ u32 done, rd_data;
+ u8 wait = 10;
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
+ rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
+ cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
+
+ spin_lock(&pdata->stats_lock);
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
rd_addr);
+
+ rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+ spin_unlock(&pdata->stats_lock);
+
+ return rd_data;
}
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
@@ -379,8 +397,8 @@ static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
(dev_addr[1] << 8) | dev_addr[0];
addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
+ xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
+ xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
}
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
@@ -405,8 +423,8 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
{
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
@@ -456,8 +474,8 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
- xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
+ mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
+ intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
switch (pdata->phy_speed) {
@@ -495,8 +513,8 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
}
mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
- xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
+ xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
xgene_enet_configure_clock(pdata);
@@ -506,7 +524,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
- xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+ xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
}
static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
@@ -528,14 +546,14 @@ static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= TX_FLOW_EN;
else
data &= ~TX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
pdata->mac_ops->enable_tx_pause(pdata, enable);
}
@@ -544,14 +562,14 @@ static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= RX_FLOW_EN;
else
data &= ~RX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
}
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
@@ -565,9 +583,9 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
xgene_gmac_set_mac_addr(pdata);
/* Adjust MDC clock frequency */
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
+ value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&value, 7);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
+ xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
/* Enable drop if bufpool not available */
xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
@@ -600,6 +618,18 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
}
+static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
u32 val = 0xffffffff;
@@ -637,32 +667,32 @@ static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
}
static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
}
static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
}
static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
@@ -733,27 +763,6 @@ static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
-
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -1009,6 +1018,7 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_enable = xgene_gmac_tx_enable,
.rx_disable = xgene_gmac_rx_disable,
.tx_disable = xgene_gmac_tx_disable,
+ .get_drop_cnt = xgene_gmac_get_drop_cnt,
.set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
.set_framesize = xgene_enet_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d250bfe94d24..5d3e18d3c94c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -115,6 +115,7 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
#define BLOCK_ETH_MAC_OFFSET 0x0000
+#define BLOCK_ETH_STATS_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
#define CLKEN_ADDR 0xc208
@@ -126,6 +127,12 @@ enum xgene_enet_rm {
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+#define STAT_ADDR_REG_OFFSET 0x14
+#define STAT_COMMAND_REG_OFFSET 0x18
+#define STAT_WRITE_REG_OFFSET 0x1c
+#define STAT_READ_REG_OFFSET 0x20
+#define STAT_COMMAND_DONE_REG_OFFSET 0x24
+
#define PCS_ADDR_REG_OFFSET 0x00
#define PCS_COMMAND_REG_OFFSET 0x04
#define PCS_WRITE_REG_OFFSET 0x08
@@ -185,6 +192,10 @@ enum xgene_enet_rm {
#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20))
#define ICM_CONFIG0_REG_0_ADDR 0x0400
#define ICM_CONFIG2_REG_0_ADDR 0x0410
+#define ECM_CONFIG0_REG_0_ADDR 0x0500
+#define ECM_CONFIG0_REG_1_ADDR 0x0504
+#define ICM_ECM_DROP_COUNT_REG0_ADDR 0x0508
+#define ICM_ECM_DROP_COUNT_REG1_ADDR 0x050c
#define RX_DV_GATE_REG_0_ADDR 0x05fc
#define TX_DV_GATE_EN0 BIT(2)
#define RX_DV_GATE_EN0 BIT(1)
@@ -217,12 +228,53 @@ enum xgene_enet_rm {
#define FULL_DUPLEX2 BIT(0)
#define PAD_CRC BIT(2)
#define LENGTH_CHK BIT(4)
-#define SCAN_AUTO_INCR BIT(5)
-#define TBYT_ADDR 0x38
-#define TPKT_ADDR 0x39
-#define TDRP_ADDR 0x45
-#define TFCS_ADDR 0x47
-#define TUND_ADDR 0x4a
+
+#define TR64_ADDR 0x20
+#define TR127_ADDR 0x21
+#define TR255_ADDR 0x22
+#define TR511_ADDR 0x23
+#define TR1K_ADDR 0x24
+#define TRMAX_ADDR 0x25
+#define TRMGV_ADDR 0x26
+
+#define RFCS_ADDR 0x29
+#define RMCA_ADDR 0x2a
+#define RBCA_ADDR 0x2b
+#define RXCF_ADDR 0x2c
+#define RXPF_ADDR 0x2d
+#define RXUO_ADDR 0x2e
+#define RALN_ADDR 0x2f
+#define RFLR_ADDR 0x30
+#define RCDE_ADDR 0x31
+#define RCSE_ADDR 0x32
+#define RUND_ADDR 0x33
+#define ROVR_ADDR 0x34
+#define RFRG_ADDR 0x35
+#define RJBR_ADDR 0x36
+#define RDRP_ADDR 0x37
+
+#define TMCA_ADDR 0x3a
+#define TBCA_ADDR 0x3b
+#define TXPF_ADDR 0x3c
+#define TDFR_ADDR 0x3d
+#define TEDF_ADDR 0x3e
+#define TSCL_ADDR 0x3f
+#define TMCL_ADDR 0x40
+#define TLCL_ADDR 0x41
+#define TXCL_ADDR 0x42
+#define TNCL_ADDR 0x43
+#define TPFH_ADDR 0x44
+#define TDRP_ADDR 0x45
+#define TJBR_ADDR 0x46
+#define TFCS_ADDR 0x47
+#define TXCF_ADDR 0x48
+#define TOVR_ADDR 0x49
+#define TUND_ADDR 0x4a
+#define TFRG_ADDR 0x4b
+#define DUMP_ADDR 0x27
+
+#define ECM_DROP_COUNT(src) xgene_get_bits(src, 0, 15)
+#define ICM_DROP_COUNT(src) xgene_get_bits(src, 16, 31)
#define TSO_IPPROTO_TCP 1
@@ -380,14 +432,16 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
-
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
int xgene_enet_phy_connect(struct net_device *ndev);
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr);
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr,
+ u32 wr_data);
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr);
extern const struct xgene_mac_ops xgene_gmac_ops;
extern const struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f37ed3506d5..d3906f6b01bd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -246,9 +246,9 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
skb_frag_t *frag;
dma_addr_t *frag_dma_addr;
u16 skb_index;
- u8 status;
- int i, ret = 0;
u8 mss_index;
+ u8 status;
+ int i;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -275,19 +275,17 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
- xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
- status);
- ret = -EIO;
+ cp_ring->tx_dropped++;
+ cp_ring->tx_errors++;
}
if (likely(skb)) {
dev_kfree_skb_any(skb);
} else {
netdev_err(cp_ring->ndev, "completion skb is NULL\n");
- ret = -EIO;
}
- return ret;
+ return 0;
}
static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
@@ -658,6 +656,18 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
buf_pool->head = head;
}
+/* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
+static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
+{
+ if (status == INGRESS_CRC &&
+ len >= (ETHER_STD_PACKET + 1) &&
+ len <= (ETHER_STD_PACKET + 4) &&
+ skb->protocol == htons(ETH_P_8021Q))
+ return true;
+
+ return false;
+}
+
/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
{
@@ -708,10 +718,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status)) {
- if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ pdata->false_rflr++;
+ } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
+ pdata->vlan_rjbr++;
+ } else {
dev_kfree_skb_any(skb);
xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
- xgene_enet_parse_error(rx_ring, pdata, status);
+ xgene_enet_parse_error(rx_ring, status);
+ rx_ring->rx_dropped++;
goto out;
}
}
@@ -1466,10 +1481,9 @@ err:
static void xgene_enet_get_stats64(
struct net_device *ndev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct rtnl_link_stats64 *stats = &pdata->stats;
struct xgene_enet_desc_ring *ring;
int i;
@@ -1478,6 +1492,8 @@ static void xgene_enet_get_stats64(
if (ring) {
stats->tx_packets += ring->tx_packets;
stats->tx_bytes += ring->tx_bytes;
+ stats->tx_dropped += ring->tx_dropped;
+ stats->tx_errors += ring->tx_errors;
}
}
@@ -1486,14 +1502,18 @@ static void xgene_enet_get_stats64(
if (ring) {
stats->rx_packets += ring->rx_packets;
stats->rx_bytes += ring->rx_bytes;
- stats->rx_errors += ring->rx_length_errors +
+ stats->rx_dropped += ring->rx_dropped;
+ stats->rx_errors += ring->rx_errors +
+ ring->rx_length_errors +
ring->rx_crc_errors +
ring->rx_frame_errors +
ring->rx_fifo_errors;
- stats->rx_dropped += ring->rx_dropped;
+ stats->rx_length_errors += ring->rx_length_errors;
+ stats->rx_crc_errors += ring->rx_crc_errors;
+ stats->rx_frame_errors += ring->rx_frame_errors;
+ stats->rx_fifo_errors += ring->rx_fifo_errors;
}
}
- memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
}
static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
@@ -1614,7 +1634,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
struct device *dev = &pdev->dev;
int i, ret, max_irqs;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
max_irqs = 1;
else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
max_irqs = 2;
@@ -1740,7 +1760,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
}
- if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
+ if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
dev_err(dev, "Incorrect phy-connection-type specified\n");
@@ -1785,15 +1805,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
+ pdata->mcx_stats_addr =
+ pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
offset = (pdata->enet_id == XGENE_ENET1) ?
BLOCK_ETH_MAC_CSR_OFFSET :
X2_BLOCK_ETH_MAC_CSR_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+ pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
}
@@ -1881,6 +1904,9 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
{
switch (pdata->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3;
@@ -2055,6 +2081,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
xgene_enet_setup_ops(pdata);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
@@ -2076,7 +2103,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
INIT_DELAYED_WORK(&pdata->link_work, link_state);
} else if (!pdata->mdio_driver) {
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
ret = xgene_enet_mdio_config(pdata);
else
INIT_DELAYED_WORK(&pdata->link_work, link_state);
@@ -2085,6 +2112,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err1;
}
+ spin_lock_init(&pdata->stats_lock);
+ ret = xgene_extd_stats_init(pdata);
+ if (ret)
+ goto err2;
+
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
@@ -2102,7 +2134,7 @@ err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
err1:
xgene_enet_delete_desc_rings(pdata);
@@ -2126,12 +2158,12 @@ static int xgene_enet_remove(struct platform_device *pdev)
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
- pdata->port_ops->shutdown(pdata);
xgene_enet_delete_desc_rings(pdata);
+ pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 0d4be2425ebc..985768596900 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -42,6 +42,7 @@
#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
+#define ETHER_STD_PACKET 1518
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
@@ -138,6 +139,8 @@ struct xgene_enet_desc_ring {
__le64 *exp_bufs;
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
+ u64 tx_errors;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
@@ -155,6 +158,7 @@ struct xgene_mac_ops {
void (*rx_enable)(struct xgene_enet_pdata *pdata);
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*get_drop_cnt)(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
@@ -212,6 +216,7 @@ struct xgene_enet_pdata {
void __iomem *eth_diag_csr_addr;
void __iomem *mcx_mac_addr;
void __iomem *mcx_mac_csr_addr;
+ void __iomem *mcx_stats_addr;
void __iomem *base_addr;
void __iomem *pcs_addr;
void __iomem *ring_csr_addr;
@@ -219,8 +224,12 @@ struct xgene_enet_pdata {
int phy_mode;
enum xgene_enet_rm rm;
struct xgene_enet_cle cle;
- struct rtnl_link_stats64 stats;
+ u64 *extd_stats;
+ u64 false_rflr;
+ u64 vlan_rjbr;
+ spinlock_t stats_lock; /* statistics lock */
const struct xgene_mac_ops *mac_ops;
+ spinlock_t mac_lock; /* mac lock */
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
const struct xgene_cle_ops *cle_ops;
@@ -263,5 +272,6 @@ static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
}
void xgene_enet_set_ethtool_ops(struct net_device *netdev);
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata);
#endif /* __XGENE_ENET_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index a8e063bdee3b..b1a83fdbefb8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -54,41 +54,6 @@ static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
- u32 wr_addr, u32 wr_data)
-{
- int i;
-
- iowrite32(wr_addr, ctl->addr);
- iowrite32(wr_data, ctl->ctl);
- iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
-
- /* wait for write command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- iowrite32(0, ctl->cmd);
- return true;
- }
- udelay(1);
- }
-
- return false;
-}
-
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
- u32 wr_addr, u32 wr_data)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
- netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
-}
-
static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
{
return ioread32(p->eth_csr_addr + offset);
@@ -104,42 +69,6 @@ static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
return ioread32(p->mcx_mac_csr_addr + offset);
}
-static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
-{
- u32 rd_data;
- int i;
-
- iowrite32(rd_addr, ctl->addr);
- iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
-
- /* wait for read command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- rd_data = ioread32(ctl->ctl);
- iowrite32(0, ctl->cmd);
-
- return rd_data;
- }
- udelay(1);
- }
-
- pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
-
- return 0;
-}
-
-static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- return xgene_enet_rd_indirect(&ctl, rd_addr);
-}
-
static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
@@ -166,6 +95,24 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
return -ENODEV;
}
+static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 addr, count;
+
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
+ ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
+ count = xgene_enet_rd_mcx_csr(pdata, addr);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ECM_CONFIG0_REG_0_ADDR :
+ ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
+ xgene_enet_rd_mcx_csr(pdata, addr);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
u32 val;
@@ -587,26 +534,6 @@ static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
struct device *dev = &p->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < p->rxq_cnt; i++) {
- ring = p->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = p->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < p->txq_cnt; i++) {
- ring = p->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(p->clk))
@@ -671,6 +598,7 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_enable = xgene_sgmac_tx_enable,
.rx_disable = xgene_sgmac_rx_disable,
.tx_disable = xgene_sgmac_tx_disable,
+ .get_drop_cnt = xgene_sgmac_get_drop_cnt,
.set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
.set_framesize = xgene_sgmac_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 423240c97d39..b7d75d067c7a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -71,21 +71,6 @@ static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
return true;
}
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
-{
- void __iomem *addr, *wr, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
-}
-
static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
u32 wr_addr, u32 wr_data)
{
@@ -148,21 +133,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
-{
- void __iomem *addr, *rd, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
- rd_addr);
-}
-
static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -210,6 +180,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
return 0;
}
+static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
@@ -300,7 +282,7 @@ static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTTCTLEN;
@@ -316,7 +298,7 @@ static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTRCTLEN;
@@ -332,7 +314,7 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_xgmac_reset(pdata);
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
data |= HSTPPEN;
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
@@ -379,7 +361,7 @@ static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
}
@@ -387,7 +369,7 @@ static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
}
@@ -395,7 +377,7 @@ static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
}
@@ -403,7 +385,7 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
@@ -464,26 +446,6 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -567,6 +529,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
.set_mac_addr = xgene_xgmac_set_mac_addr,
.set_framesize = xgene_xgmac_set_frame_size,
.set_mss = xgene_xgmac_set_mss,
+ .get_drop_cnt = xgene_xgmac_get_drop_cnt,
.link_state = xgene_enet_link_state,
.enable_tx_pause = xgene_xgmac_enable_tx_pause,
.flowctl_rx = xgene_xgmac_flowctl_rx,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index e644a429ebf4..a3b45517df45 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -23,6 +23,7 @@
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
+#define BLOCK_AXG_STATS_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
#define BLOCK_PCS_OFFSET 0x3800
@@ -70,6 +71,8 @@
#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
+#define XG_MCX_ECM_CONFIG0_REG_0_ADDR 0x0070
+#define XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR 0x0124
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
@@ -80,6 +83,8 @@
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XGENET_ECM_CONFIG0_REG_0 0x0870
+#define XGENET_ICM_ECM_DROP_COUNT_REG0 0x0924
#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880
#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888
#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 2b2d87089987..eac740c476ce 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1218,8 +1218,7 @@ static void bmac_reset_and_enable(struct net_device *dev)
*/
skb = netdev_alloc_skb(dev, ETHERMINPACKET);
if (skb != NULL) {
- data = skb_put(skb, ETHERMINPACKET);
- memset(data, 0, ETHERMINPACKET);
+ data = skb_put_zero(skb, ETHERMINPACKET);
memcpy(data, dev->dev_addr, ETH_ALEN);
memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
bmac_transmit_packet(skb, dev);
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 857df9c45f04..f17a160dbff2 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -663,7 +663,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
return;
}
skb_reserve(skb, 2);
- memcpy(skb_put(skb, frame_length), mf->data, frame_length);
+ skb_put_data(skb, mf->data, frame_length);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3a8a4aa13687..9a0817938eca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -223,7 +223,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
- __skb_mark_checksum_bad(skb);
+ skb->ip_summed = CHECKSUM_NONE;
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7e913d8331c3..8c9986f3fc01 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2252,7 +2252,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
netif_info(adapter, tx_done, adapter->netdev,
- "tx-skb droppted due to dma error\n");
+ "tx-skb dropped due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 5711fbbd6ae3..041cfb7952f8 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -251,7 +251,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
if (len <= RX_COPYBREAK) {
dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
dma_sync_single_for_device(&dev->dev, dma, len,
DMA_FROM_DEVICE);
} else {
@@ -264,7 +264,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
}
dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
+ skb_put_data(skb, data, RX_COPYHDR);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
offset + RX_COPYHDR, len - RX_COPYHDR,
RX_BUF_SIZE);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 5b95bb48ce97..f411936b744c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1836,7 +1836,9 @@ static int b44_get_link_ksettings(struct net_device *dev,
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
BUG_ON(!dev->phydev);
- return phy_ethtool_ksettings_get(dev->phydev, cmd);
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ return 0;
}
supported = (SUPPORTED_Autoneg);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 50d88d3e03b6..61a88b64bd39 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -609,8 +609,7 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
skb = nskb;
}
- data = skb_put(skb, needed);
- memset(data, 0, needed);
+ data = skb_put_zero(skb, needed);
}
/* point to the next available desc */
@@ -1453,7 +1452,10 @@ static int bcm_enet_get_link_ksettings(struct net_device *dev,
if (priv->has_phy) {
if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ return 0;
} else {
cmd->base.autoneg = 0;
cmd->base.speed = (priv->force_speed_100) ?
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5274501428e4..5333601f855f 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1099,7 +1099,7 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
skb = nskb;
}
- tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+ tsb = skb_push(skb, sizeof(*tsb));
/* Zero-out TSB by default */
memset(tsb, 0, sizeof(*tsb));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f619c4cac51f..67fe3d826566 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4284,8 +4284,8 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return 0;
}
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 243cb9748d35..c26688d2f326 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 349a46593abf..c12b4d3e946e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10303,7 +10303,7 @@ sp_rtnl_not_reset:
}
if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
&bp->sp_rtnl_state)){
- if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+ if (netif_carrier_ok(bp->dev)) {
bnx2x_tx_disable(bp);
BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
}
@@ -15351,6 +15351,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
bp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 74e8e215524d..a19f68f5862d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -175,6 +175,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
@@ -461,14 +463,17 @@ normal_tx:
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- writel(DB_KEY_TX | prod, txr->tx_doorbell);
- writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ if (!skb->xmit_more || netif_xmit_stopped(txq))
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
tx_done:
mmiowb();
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ if (skb->xmit_more && !tx_buf->is_push)
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
@@ -582,7 +587,8 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page)
return NULL;
- *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
__free_page(page);
return NULL;
@@ -601,8 +607,9 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir);
+ *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -705,8 +712,9 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return -ENOMEM;
}
- mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
return -EIO;
@@ -799,7 +807,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(data_ptr, len);
@@ -841,8 +850,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
return NULL;
@@ -909,8 +918,9 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
skb->data_len += frag_len;
skb->len += frag_len;
@@ -1330,8 +1340,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
@@ -1815,8 +1826,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* Sync BD data before updating doorbell */
wmb();
- writel(DB_KEY_TX | prod, db);
- writel(DB_KEY_TX | prod, db);
+ bnxt_db_write(bp, db, DB_KEY_TX | prod);
}
cpr->cp_raw_cons = raw_cons;
@@ -1832,14 +1842,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (event & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (event & BNXT_AGG_EVENT) {
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
- }
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
+ if (event & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, rxr->rx_agg_doorbell,
+ DB_KEY_RX | rxr->rx_agg_prod);
}
return rx_pkts;
}
@@ -1899,13 +1905,11 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
cpr->cp_raw_cons = raw_cons;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
- if (event & BNXT_AGG_EVENT) {
- writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
- writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
- }
+ if (event & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, rxr->rx_agg_doorbell,
+ DB_KEY_RX | rxr->rx_agg_prod);
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
@@ -2015,9 +2019,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data)
continue;
- dma_unmap_single(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev,
+ tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
@@ -2037,13 +2043,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
- dma_unmap_page(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
__free_page(data);
} else {
- dma_unmap_single(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
kfree(data);
}
}
@@ -2056,8 +2064,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page)
continue;
- dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
@@ -2900,6 +2910,32 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
return 0;
}
+static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ if (bp->hwrm_short_cmd_req_addr) {
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ bp->hwrm_short_cmd_req_addr,
+ bp->hwrm_short_cmd_req_dma_addr);
+ bp->hwrm_short_cmd_req_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_short_cmd_req_addr =
+ dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ &bp->hwrm_short_cmd_req_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_short_cmd_req_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
static void bnxt_free_stats(struct bnxt *bp)
{
u32 size, i;
@@ -3247,16 +3283,41 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ struct hwrm_short_input short_input = {0};
+
+ memcpy(short_cmd_req, req, msg_len);
+ memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
+ msg_len);
+
+ short_input.req_type = req->req_type;
+ short_input.signature =
+ cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = cpu_to_le16(msg_len);
+ short_input.req_addr =
+ cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (u32 *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
+
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < max_req_len; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
@@ -4694,6 +4755,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
int rc;
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
@@ -4731,6 +4793,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -7079,8 +7146,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
@@ -7388,6 +7455,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_free_hwrm_short_cmd_req(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
@@ -7542,10 +7610,9 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
return rc;
}
-static int bnxt_set_dflt_rings(struct bnxt *bp)
+static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
- bool sh = true;
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
@@ -7638,6 +7705,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ }
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
@@ -7677,8 +7750,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
bp->gro_func = bnxt_gro_func_5730x;
- if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+ if (BNXT_CHIP_P4_PLUS(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else
+ bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
@@ -7716,7 +7791,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs);
- rc = bnxt_set_dflt_rings(bp);
+ rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
rc = -ENOMEM;
@@ -7728,9 +7803,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
- !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
- bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -7802,6 +7875,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
dev_close(dev);
if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d46a85041083..f872a7db2ca8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -504,6 +504,7 @@ struct rx_tpa_end_cmp_ext {
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -941,31 +942,45 @@ struct bnxt {
#define CHIP_NUM_57402 0x16d0
#define CHIP_NUM_57404 0x16d1
#define CHIP_NUM_57406 0x16d2
+#define CHIP_NUM_57407 0x16d5
#define CHIP_NUM_57311 0x16ce
#define CHIP_NUM_57312 0x16cf
#define CHIP_NUM_57314 0x16df
+#define CHIP_NUM_57317 0x16e0
#define CHIP_NUM_57412 0x16d6
#define CHIP_NUM_57414 0x16d7
#define CHIP_NUM_57416 0x16d8
#define CHIP_NUM_57417 0x16d9
+#define CHIP_NUM_57412L 0x16da
+#define CHIP_NUM_57414L 0x16db
+
+#define CHIP_NUM_5745X 0xd730
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
#define BNXT_CHIP_NUM_5740X(chip_num) \
- ((chip_num) >= CHIP_NUM_57402 && \
- (chip_num) <= CHIP_NUM_57406)
+ (((chip_num) >= CHIP_NUM_57402 && \
+ (chip_num) <= CHIP_NUM_57406) || \
+ (chip_num) == CHIP_NUM_57407)
#define BNXT_CHIP_NUM_5731X(chip_num) \
((chip_num) == CHIP_NUM_57311 || \
(chip_num) == CHIP_NUM_57312 || \
- (chip_num) == CHIP_NUM_57314)
+ (chip_num) == CHIP_NUM_57314 || \
+ (chip_num) == CHIP_NUM_57317)
#define BNXT_CHIP_NUM_5741X(chip_num) \
((chip_num) >= CHIP_NUM_57412 && \
- (chip_num) <= CHIP_NUM_57417)
+ (chip_num) <= CHIP_NUM_57414L)
+
+#define BNXT_CHIP_NUM_58700(chip_num) \
+ ((chip_num) == CHIP_NUM_58700)
+
+#define BNXT_CHIP_NUM_5745X(chip_num) \
+ ((chip_num) == CHIP_NUM_5745X)
#define BNXT_CHIP_NUM_57X0X(chip_num) \
(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
@@ -1010,6 +1025,8 @@ struct bnxt {
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_SHORT_CMD 0x200000
+ #define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1024,6 +1041,13 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+/* Chip class phase 4 and later */
+#define BNXT_CHIP_P4_PLUS(bp) \
+ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
+ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
+ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \
+ !BNXT_CHIP_TYPE_NITRO_A0(bp)))
+
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -1110,6 +1134,8 @@ struct bnxt {
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
+ void *hwrm_short_cmd_req_addr;
+ dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
void *hwrm_dbg_resp_addr;
@@ -1233,6 +1259,14 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+/* For TX and RX ring doorbells */
+static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+{
+ writel(val, db);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel(val, db);
+}
+
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 11ddf0adc6e1..fd1181510b65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2376,8 +2376,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
/* Sync BD data before updating doorbell */
wmb();
- writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
- writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
rc = bnxt_poll_loopback(bp, pkt_size);
dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8b7464b76501..77da75a55c02 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -266,6 +266,25 @@ void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
}
}
+void bnxt_ulp_shutdown(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_shutdown)
+ continue;
+ ops->ulp_shutdown(ulp->handle);
+ }
+}
+
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 74f816e46a33..d2471067dc37 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -26,6 +26,7 @@ struct bnxt_ulp_ops {
void (*ulp_stop)(void *);
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
+ void (*ulp_shutdown)(void *);
};
struct bnxt_msix_entry {
@@ -87,6 +88,7 @@ void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 9dae32756767..7d67552e70d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -63,7 +63,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
- writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
}
/* returns the following:
@@ -218,6 +218,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
break;
case XDP_QUERY_PROG:
xdp->prog_attached = !!bp->xdp_prog;
+ xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a205a9ff9e17..daca1c9d254b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -477,7 +477,9 @@ static int bcmgenet_get_link_ksettings(struct net_device *dev,
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(priv->phydev, cmd);
+ phy_ethtool_ksettings_get(priv->phydev, cmd);
+
+ return 0;
}
static int bcmgenet_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 285676f8da6b..071fcbd14e6a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -251,11 +251,8 @@ int bcmgenet_mii_config(struct net_device *dev)
priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
- if (priv->internal_phy)
- priv->phy_interface = PHY_INTERFACE_MODE_NA;
-
switch (priv->phy_interface) {
- case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_INTERNAL:
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
@@ -471,7 +468,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
- const char *phy_mode_str = NULL;
struct phy_device *phydev = NULL;
char *compat;
int phy_mode;
@@ -510,23 +506,19 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Get the link mode */
phy_mode = of_get_phy_mode(dn);
+ if (phy_mode < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return phy_mode;
+ }
+
priv->phy_interface = phy_mode;
/* We need to specifically look up whether this PHY interface is internal
* or not *before* we even try to probe the PHY driver over MDIO as we
* may have shut down the internal PHY for power saving purposes.
*/
- if (phy_mode < 0) {
- ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
- if (ret < 0) {
- dev_err(kdev, "invalid PHY mode property\n");
- return ret;
- }
-
- priv->phy_interface = PHY_INTERFACE_MODE_NA;
- if (!strcasecmp(phy_mode_str, "internal"))
- priv->internal_phy = true;
- }
+ if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ priv->internal_phy = true;
/* Make sure we initialize MoCA PHYs with a link down */
if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 537d571ee601..d600c41fb1dc 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12097,7 +12097,9 @@ static int tg3_get_link_ksettings(struct net_device *dev,
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
supported = (SUPPORTED_Autoneg);
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 608bea171956..427d65a1a126 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -29,7 +29,15 @@ config MACB
support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
- will be called macb.
+ will be macb.
+
+config MACB_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on MACB
+ default y
+ imply PTP_1588_CLOCK
+ ---help---
+ Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
config MACB_PCI
tristate "Cadence PCI MACB/GEM support"
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 4ba75594d5c5..1d66ddb68969 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -1,6 +1,11 @@
#
# Makefile for the Atmel network device drivers.
#
+macb-y := macb_main.o
+
+ifeq ($(CONFIG_MACB_USE_HWSTAMP),y)
+macb-y += macb_ptp.o
+endif
obj-$(CONFIG_MACB) += macb.o
obj-$(CONFIG_MACB_PCI) += macb_pci.o
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ec037b0fa2a4..c93f3a2dc6c1 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -11,6 +11,12 @@
#define _MACB_H
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
+#define MACB_EXT_DESC
+#endif
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -86,6 +92,10 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
+#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
+#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
+#define GEM_PEFRSH 0x00f4 /* PTP Peer Event Frame Received Seconds Register 47:32 */
#define GEM_OTX 0x0100 /* Octets transmitted */
#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */
#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */
@@ -155,6 +165,9 @@
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
+#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
+#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
+
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
@@ -191,6 +204,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15
+#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
+#define MACB_OSSMODE_SIZE 1
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -269,6 +284,10 @@
#define GEM_RXBS_SIZE 8
#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
#define GEM_DDRP_SIZE 1
+#define GEM_RXEXT_OFFSET 28 /* RX extended Buffer Descriptor mode */
+#define GEM_RXEXT_SIZE 1
+#define GEM_TXEXT_OFFSET 29 /* TX extended Buffer Descriptor mode */
+#define GEM_TXEXT_SIZE 1
#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
#define GEM_ADDR64_SIZE 1
@@ -425,6 +444,11 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
+
+/* Bitfields in DCFG5. */
+#define GEM_TSU_OFFSET 8
+#define GEM_TSU_SIZE 1
+
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
@@ -439,6 +463,52 @@
#define GEM_NSINCR_OFFSET 0
#define GEM_NSINCR_SIZE 8
+/* Bitfields in TSH */
+#define GEM_TSH_OFFSET 0 /* TSU timer value (s). MSB [47:32] of seconds timer count */
+#define GEM_TSH_SIZE 16
+
+/* Bitfields in TSL */
+#define GEM_TSL_OFFSET 0 /* TSU timer value (s). LSB [31:0] of seconds timer count */
+#define GEM_TSL_SIZE 32
+
+/* Bitfields in TN */
+#define GEM_TN_OFFSET 0 /* TSU timer value (ns) */
+#define GEM_TN_SIZE 30
+
+/* Bitfields in TXBDCTRL */
+#define GEM_TXTSMODE_OFFSET 4 /* TX Descriptor Timestamp Insertion mode */
+#define GEM_TXTSMODE_SIZE 2
+
+/* Bitfields in RXBDCTRL */
+#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
+#define GEM_RXTSMODE_SIZE 2
+
+/* Transmit DMA buffer descriptor Word 1 */
+#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
+#define GEM_DMA_TXVALID_SIZE 1
+
+/* Receive DMA buffer descriptor Word 0 */
+#define GEM_DMA_RXVALID_OFFSET 2 /* indicates a valid timestamp in the Buffer Descriptor */
+#define GEM_DMA_RXVALID_SIZE 1
+
+/* DMA buffer descriptor Word 2 (32 bit addressing) or Word 4 (64 bit addressing) */
+#define GEM_DMA_SECL_OFFSET 30 /* Timestamp seconds[1:0] */
+#define GEM_DMA_SECL_SIZE 2
+#define GEM_DMA_NSEC_OFFSET 0 /* Timestamp nanosecs [29:0] */
+#define GEM_DMA_NSEC_SIZE 30
+
+/* DMA buffer descriptor Word 3 (32 bit addressing) or Word 5 (64 bit addressing) */
+
+/* New hardware supports 12 bit precision of timestamp in DMA buffer descriptor.
+ * Old hardware supports only 6 bit precision but it is enough for PTP.
+ * Less accuracy is used always instead of checking hardware version.
+ */
+#define GEM_DMA_SECH_OFFSET 0 /* Timestamp seconds[5:2] */
+#define GEM_DMA_SECH_SIZE 4
+#define GEM_DMA_SEC_WIDTH (GEM_DMA_SECH_SIZE + GEM_DMA_SECL_SIZE)
+#define GEM_DMA_SEC_TOP (1 << GEM_DMA_SEC_WIDTH)
+#define GEM_DMA_SEC_MASK (GEM_DMA_SEC_TOP - 1)
+
/* Bitfields in ADJ */
#define GEM_ADDSUB_OFFSET 31
#define GEM_ADDSUB_SIZE 1
@@ -514,6 +584,8 @@
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
+
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -546,16 +618,26 @@ struct macb_dma_desc {
u32 ctrl;
};
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-enum macb_hw_dma_cap {
- HW_DMA_CAP_32B,
- HW_DMA_CAP_64B,
-};
+#ifdef MACB_EXT_DESC
+#define HW_DMA_CAP_32B 0
+#define HW_DMA_CAP_64B (1 << 0)
+#define HW_DMA_CAP_PTP (1 << 1)
+#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP)
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
};
+
+struct macb_dma_desc_ptp {
+ u32 ts_1;
+ u32 ts_2;
+};
+
+struct gem_tx_ts {
+ struct sk_buff *skb;
+ struct macb_dma_desc_ptp desc_ptp;
+};
#endif
/* DMA descriptor bitfields */
@@ -871,6 +953,11 @@ struct macb_config {
int jumbo_max_len;
};
+struct tsu_incr {
+ u32 sub_ns;
+ u32 ns;
+};
+
struct macb_queue {
struct macb *bp;
int irq;
@@ -887,6 +974,12 @@ struct macb_queue {
struct macb_tx_skb *tx_skb;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ struct work_struct tx_ts_task;
+ unsigned int tx_ts_head, tx_ts_tail;
+ struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
+#endif
};
struct macb {
@@ -930,6 +1023,7 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
+ struct device_node *phy_node;
int link;
int speed;
int duplex;
@@ -954,11 +1048,62 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- enum macb_hw_dma_cap hw_dma_cap;
+#ifdef MACB_EXT_DESC
+ uint8_t hw_dma_cap;
#endif
+ spinlock_t tsu_clk_lock; /* gem tsu clock locking */
+ unsigned int tsu_rate;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct tsu_incr tsu_incr;
+ struct hwtstamp_config tstamp_config;
};
+#ifdef CONFIG_MACB_USE_HWSTAMP
+#define GEM_TSEC_SIZE (GEM_TSH_SIZE + GEM_TSL_SIZE)
+#define TSU_SEC_MAX_VAL (((u64)1 << GEM_TSEC_SIZE) - 1)
+#define TSU_NSEC_MAX_VAL ((1 << GEM_TN_SIZE) - 1)
+
+enum macb_bd_control {
+ TSTAMP_DISABLED,
+ TSTAMP_FRAME_PTP_EVENT_ONLY,
+ TSTAMP_ALL_PTP_FRAMES,
+ TSTAMP_ALL_FRAMES,
+};
+
+void gem_ptp_init(struct net_device *ndev);
+void gem_ptp_remove(struct net_device *ndev);
+int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
+static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return -ENOTSUPP;
+
+ return gem_ptp_txstamp(queue, skb, desc);
+}
+
+static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ if (bp->tstamp_config.rx_filter == TSTAMP_DISABLED)
+ return;
+
+ gem_ptp_rxstamp(bp, skb, desc);
+}
+int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
+int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
+#else
+static inline void gem_ptp_init(struct net_device *ndev) { }
+static inline void gem_ptp_remove(struct net_device *ndev) { }
+
+static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+{
+ return -1;
+}
+
+static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
+#endif
+
static inline bool macb_is_gem(struct macb *bp)
{
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb_main.c
index 91f7492623d3..e69ebdd65658 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -79,33 +79,83 @@
#define MACB_HALT_TIMEOUT 1230
/* DMA buffer descriptor might be different size
- * depends on hardware configuration.
+ * depends on hardware configuration:
+ *
+ * 1. dma address width 32 bits:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ *
+ * 2. dma address width 64 bits:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: upper 32 bit address of Data Buffer
+ * word 4: unused
+ *
+ * 3. dma address width 32 bits with hardware timestamping:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: timestamp word 1
+ * word 4: timestamp word 2
+ *
+ * 4. dma address width 64 bits with hardware timestamping:
+ * word 1: 32 bit address of Data Buffer
+ * word 2: control
+ * word 3: upper 32 bit address of Data Buffer
+ * word 4: unused
+ * word 5: timestamp word 1
+ * word 6: timestamp word 2
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
- return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#ifdef MACB_EXT_DESC
+ unsigned int desc_size;
+
+ switch (bp->hw_dma_cap) {
+ case HW_DMA_CAP_64B:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64);
+ break;
+ case HW_DMA_CAP_PTP:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_ptp);
+ break;
+ case HW_DMA_CAP_64B_PTP:
+ desc_size = sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64)
+ + sizeof(struct macb_dma_desc_ptp);
+ break;
+ default:
+ desc_size = sizeof(struct macb_dma_desc);
+ }
+ return desc_size;
#endif
return sizeof(struct macb_dma_desc);
}
-static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- /* Dma buffer descriptor is 4 words length (instead of 2 words)
- * for 64b GEM.
- */
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
- idx <<= 1;
+#ifdef MACB_EXT_DESC
+ switch (bp->hw_dma_cap) {
+ case HW_DMA_CAP_64B:
+ case HW_DMA_CAP_PTP:
+ desc_idx <<= 1;
+ break;
+ case HW_DMA_CAP_64B_PTP:
+ desc_idx *= 3;
+ break;
+ default:
+ break;
+ }
#endif
- return idx;
+ return desc_idx;
}
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
- return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+ return NULL;
}
#endif
@@ -425,32 +475,40 @@ static int macb_mii_probe(struct net_device *dev)
int phy_irq;
int ret;
- phydev = phy_find_first(bp->mii_bus);
- if (!phydev) {
- netdev_err(dev, "no PHY found\n");
- return -ENXIO;
- }
+ if (bp->phy_node) {
+ phydev = of_phy_connect(dev, bp->phy_node,
+ &macb_handle_link_change, 0,
+ bp->phy_interface);
+ if (!phydev)
+ return -ENODEV;
+ } else {
+ phydev = phy_find_first(bp->mii_bus);
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
- pdata = dev_get_platdata(&bp->pdev->dev);
- if (pdata) {
- if (gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev,
- pdata->phy_irq_pin, "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata) {
+ if (gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev,
+ pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ } else {
+ phydev->irq = PHY_POLL;
}
- } else {
- phydev->irq = PHY_POLL;
}
- }
- /* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
- bp->phy_interface);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY\n");
- return ret;
+ /* attach the mac to the phy */
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
+ bp->phy_interface);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
}
/* mask with MAC supported features */
@@ -499,26 +557,37 @@ static int macb_mii_init(struct macb *bp)
np = bp->pdev->dev.of_node;
if (np) {
- /* try dt phy registration */
- err = of_mdiobus_register(bp->mii_bus, np);
+ if (of_phy_is_fixed_link(np)) {
+ if (of_phy_register_fixed_link(np) < 0) {
+ dev_err(&bp->pdev->dev,
+ "broken fixed-link specification\n");
+ goto err_out_unregister_bus;
+ }
+ bp->phy_node = of_node_get(np);
- /* fallback to standard phy registration if no phy were
- * found during dt phy registration
- */
- if (!err && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- err = PTR_ERR(phydev);
- break;
+ err = mdiobus_register(bp->mii_bus);
+ } else {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ * found during dt phy registration
+ */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
+ err = PTR_ERR(phydev);
+ break;
+ }
}
- }
- if (err)
- goto err_out_unregister_bus;
+ if (err)
+ goto err_out_unregister_bus;
+ }
}
} else {
for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -602,7 +671,7 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
}
@@ -616,7 +685,7 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
@@ -715,7 +784,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
@@ -777,6 +846,12 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
+ if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ /* skb now belongs to timestamp buffer
+ * and will be removed later
+ */
+ tx_skb->skb = NULL;
+ }
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -943,6 +1018,8 @@ static int gem_rx(struct macb *bp, int budget)
bp->dev->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ gem_ptp_do_rxstamp(bp, skb, desc);
+
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
@@ -1264,7 +1341,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(HRESP));
}
-
status = queue_readl(queue, ISR);
}
@@ -1594,7 +1670,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
-
skb_tx_timestamp(skb);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -1923,9 +1998,13 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dmacfg |= GEM_BIT(ADDR64);
#endif
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
+#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -1973,13 +2052,13 @@ static void macb_init_hw(struct macb *bp)
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
@@ -2448,6 +2527,70 @@ static int macb_set_ringparam(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MACB_USE_HWSTAMP
+static unsigned int gem_get_tsu_rate(struct macb *bp)
+{
+ struct clk *tsu_clk;
+ unsigned int tsu_rate;
+
+ tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
+ if (!IS_ERR(tsu_clk))
+ tsu_rate = clk_get_rate(tsu_clk);
+ /* try pclk instead */
+ else if (!IS_ERR(bp->pclk)) {
+ tsu_clk = bp->pclk;
+ tsu_rate = clk_get_rate(tsu_clk);
+ } else
+ return -ENOTSUPP;
+ return tsu_rate;
+}
+
+static s32 gem_get_ptp_max_adj(void)
+{
+ return 64000000;
+}
+
+static int gem_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
+ ethtool_op_get_ts_info(dev, info);
+ return 0;
+ }
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+
+ return 0;
+}
+
+static struct macb_ptp_info gem_ptp_info = {
+ .ptp_init = gem_ptp_init,
+ .ptp_remove = gem_ptp_remove,
+ .get_ptp_max_adj = gem_get_ptp_max_adj,
+ .get_tsu_rate = gem_get_tsu_rate,
+ .get_ts_info = gem_get_ts_info,
+ .get_hwtst = gem_get_hwtst,
+ .set_hwtst = gem_set_hwtst,
+};
+#endif
+
static int macb_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -2581,6 +2724,16 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (gem_has_ptp(bp)) {
+ if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
+ pr_err("GEM doesn't support hardware ptp.\n");
+ else {
+ bp->hw_dma_cap |= HW_DMA_CAP_PTP;
+ bp->ptp_info = &gem_ptp_info;
+ }
+ }
+#endif
}
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
@@ -2718,7 +2871,7 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
} else {
@@ -2729,7 +2882,7 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = MACB_TBQPH;
#endif
}
@@ -2992,7 +3145,7 @@ static void at91ether_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
skb_reserve(skb, 2);
- memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+ skb_put_data(skb, p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
@@ -3186,7 +3339,9 @@ static const struct macb_config np4_config = {
};
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3220,7 +3375,9 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */
static const struct macb_config default_gem_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3309,19 +3466,17 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
- bp->hw_dma_cap = HW_DMA_CAP_64B;
- } else
- bp->hw_dma_cap = HW_DMA_CAP_32B;
-#endif
-
spin_lock_init(&bp->lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ }
+#endif
platform_set_drvdata(pdev, dev);
dev->irq = platform_get_irq(pdev, 0);
@@ -3438,6 +3593,7 @@ static int macb_remove(struct platform_device *pdev)
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
+ of_node_put(bp->phy_node);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
new file mode 100755
index 000000000000..67cca08472b7
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -0,0 +1,518 @@
+/**
+ * 1588 PTP support for Cadence GEM device.
+ *
+ * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com
+ *
+ * Authors: Rafal Ozieblo <rafalo@cadence.com>
+ * Bartosz Folta <bfolta@cadence.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/ptp_classify.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+#include <linux/circ_buf.h>
+#include <linux/spinlock.h>
+
+#include "macb.h"
+
+#define GEM_PTP_TIMER_NAME "gem-ptp-timer"
+
+static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
+ struct macb_dma_desc *desc)
+{
+ if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc)
+ + sizeof(struct macb_dma_desc_64));
+ return NULL;
+}
+
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ unsigned long flags;
+ long first, second;
+ u32 secl, sech;
+
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ first = gem_readl(bp, TN);
+ secl = gem_readl(bp, TSL);
+ sech = gem_readl(bp, TSH);
+ second = gem_readl(bp, TN);
+
+ /* test for nsec rollover */
+ if (first > second) {
+ /* if so, use later read & re-read seconds
+ * (assume all done within 1s)
+ */
+ ts->tv_nsec = gem_readl(bp, TN);
+ secl = gem_readl(bp, TSL);
+ sech = gem_readl(bp, TSH);
+ } else {
+ ts->tv_nsec = first;
+ }
+
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+ ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl)
+ & TSU_SEC_MAX_VAL;
+ return 0;
+}
+
+static int gem_tsu_set_time(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ unsigned long flags;
+ u32 ns, sech, secl;
+
+ secl = (u32)ts->tv_sec;
+ sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1);
+ ns = ts->tv_nsec;
+
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+
+ /* TSH doesn't latch the time and no atomicity! */
+ gem_writel(bp, TN, 0); /* clear to avoid overflow */
+ gem_writel(bp, TSH, sech);
+ /* write lower bits 2nd, for synchronized secs update */
+ gem_writel(bp, TSL, secl);
+ gem_writel(bp, TN, ns);
+
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+
+ return 0;
+}
+
+static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
+{
+ unsigned long flags;
+
+ /* tsu_timer_incr register must be written after
+ * the tsu_timer_incr_sub_ns register and the write operation
+ * will cause the value written to the tsu_timer_incr_sub_ns register
+ * to take effect.
+ */
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
+ gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
+
+ return 0;
+}
+
+static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ struct tsu_incr incr_spec;
+ bool neg_adj = false;
+ u32 word;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* Adjustment is relative to base frequency */
+ incr_spec.sub_ns = bp->tsu_incr.sub_ns;
+ incr_spec.ns = bp->tsu_incr.ns;
+
+ /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */
+ word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns;
+ adj = (u64)scaled_ppm * word;
+ /* Divide with rounding, equivalent to floating dividing:
+ * (temp / USEC_PER_SEC) + 0.5
+ */
+ adj += (USEC_PER_SEC >> 1);
+ adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */
+ adj = div_u64(adj, USEC_PER_SEC);
+ adj = neg_adj ? (word - adj) : (word + adj);
+
+ incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE)
+ & ((1 << GEM_NSINCR_SIZE) - 1);
+ incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1);
+ gem_tsu_incr_set(bp, &incr_spec);
+ return 0;
+}
+
+static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+ u32 adj, sign = 0;
+
+ if (delta < 0) {
+ sign = 1;
+ delta = -delta;
+ }
+
+ if (delta > TSU_NSEC_MAX_VAL) {
+ gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ if (sign)
+ now = timespec64_sub(now, then);
+ else
+ now = timespec64_add(now, then);
+
+ gem_tsu_set_time(&bp->ptp_clock_info,
+ (const struct timespec64 *)&now);
+ } else {
+ adj = (sign << GEM_ADDSUB_OFFSET) | delta;
+
+ gem_writel(bp, TA, adj);
+ }
+
+ return 0;
+}
+
+static int gem_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info gem_ptp_caps_template = {
+ .owner = THIS_MODULE,
+ .name = GEM_PTP_TIMER_NAME,
+ .max_adj = 0,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = gem_ptp_adjfine,
+ .adjtime = gem_ptp_adjtime,
+ .gettime64 = gem_tsu_get_time,
+ .settime64 = gem_tsu_set_time,
+ .enable = gem_ptp_enable,
+};
+
+static void gem_ptp_init_timer(struct macb *bp)
+{
+ u32 rem = 0;
+ u64 adj;
+
+ bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem);
+ if (rem) {
+ adj = rem;
+ adj <<= GEM_SUBNSINCR_SIZE;
+ bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate);
+ } else {
+ bp->tsu_incr.sub_ns = 0;
+ }
+}
+
+static void gem_ptp_init_tsu(struct macb *bp)
+{
+ struct timespec64 ts;
+
+ /* 1. get current system time */
+ ts = ns_to_timespec64(ktime_to_ns(ktime_get_real()));
+
+ /* 2. set ptp timer */
+ gem_tsu_set_time(&bp->ptp_clock_info, &ts);
+
+ /* 3. set PTP timer increment value to BASE_INCREMENT */
+ gem_tsu_incr_set(bp, &bp->tsu_incr);
+
+ gem_writel(bp, TA, 0);
+}
+
+static void gem_ptp_clear_timer(struct macb *bp)
+{
+ bp->tsu_incr.sub_ns = 0;
+ bp->tsu_incr.ns = 0;
+
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0));
+ gem_writel(bp, TI, GEM_BF(NSINCR, 0));
+ gem_writel(bp, TA, 0);
+}
+
+static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
+ u32 dma_desc_ts_2, struct timespec64 *ts)
+{
+ struct timespec64 tsu;
+
+ ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
+ GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
+ ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1);
+
+ /* TSU overlapping workaround
+ * The timestamp only contains lower few bits of seconds,
+ * so add value from 1588 timer
+ */
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+
+ /* If the top bit is set in the timestamp,
+ * but not in 1588 timer, it has rolled over,
+ * so subtract max size
+ */
+ if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
+ !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
+ ts->tv_sec -= GEM_DMA_SEC_TOP;
+
+ ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+
+ return 0;
+}
+
+void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
+{
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ struct macb_dma_desc_ptp *desc_ptp;
+ struct timespec64 ts;
+
+ if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
+ desc_ptp = macb_ptp_desc(bp, desc);
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ }
+}
+
+static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc_ptp *desc_ptp)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+}
+
+int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
+{
+ unsigned long tail = READ_ONCE(queue->tx_ts_tail);
+ unsigned long head = queue->tx_ts_head;
+ struct macb_dma_desc_ptp *desc_ptp;
+ struct gem_tx_ts *tx_timestamp;
+
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
+ return -EINVAL;
+
+ if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
+ return -ENOMEM;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc_ptp = macb_ptp_desc(queue->bp, desc);
+ tx_timestamp = &queue->tx_timestamps[head];
+ tx_timestamp->skb = skb;
+ tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
+ tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
+ /* move head */
+ smp_store_release(&queue->tx_ts_head,
+ (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
+
+ schedule_work(&queue->tx_ts_task);
+ return 0;
+}
+
+static void gem_tx_timestamp_flush(struct work_struct *work)
+{
+ struct macb_queue *queue =
+ container_of(work, struct macb_queue, tx_ts_task);
+ unsigned long head, tail;
+ struct gem_tx_ts *tx_ts;
+
+ /* take current head */
+ head = smp_load_acquire(&queue->tx_ts_head);
+ tail = queue->tx_ts_tail;
+
+ while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
+ tx_ts = &queue->tx_timestamps[tail];
+ gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
+ /* cleanup */
+ dev_kfree_skb_any(tx_ts->skb);
+ /* remove old tail */
+ smp_store_release(&queue->tx_ts_tail,
+ (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
+ tail = queue->tx_ts_tail;
+ }
+}
+
+void gem_ptp_init(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
+ unsigned int q;
+
+ bp->ptp_clock_info = gem_ptp_caps_template;
+
+ /* nominal frequency and maximum adjustment in ppb */
+ bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp);
+ bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj();
+ gem_ptp_init_timer(bp);
+ bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev);
+ if (IS_ERR(bp->ptp_clock)) {
+ pr_err("ptp clock register failed: %ld\n",
+ PTR_ERR(bp->ptp_clock));
+ bp->ptp_clock = NULL;
+ return;
+ } else if (bp->ptp_clock == NULL) {
+ pr_err("ptp clock register failed\n");
+ return;
+ }
+
+ spin_lock_init(&bp->tsu_clk_lock);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue->tx_ts_head = 0;
+ queue->tx_ts_tail = 0;
+ INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
+ }
+
+ gem_ptp_init_tsu(bp);
+
+ dev_info(&bp->pdev->dev, "%s ptp clock registered.\n",
+ GEM_PTP_TIMER_NAME);
+}
+
+void gem_ptp_remove(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+
+ if (bp->ptp_clock)
+ ptp_clock_unregister(bp->ptp_clock);
+
+ gem_ptp_clear_timer(bp);
+
+ dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
+ GEM_PTP_TIMER_NAME);
+}
+
+static int gem_ptp_set_ts_mode(struct macb *bp,
+ enum macb_bd_control tx_bd_control,
+ enum macb_bd_control rx_bd_control)
+{
+ gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control));
+ gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control));
+
+ return 0;
+}
+
+int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
+{
+ struct hwtstamp_config *tstamp_config;
+ struct macb *bp = netdev_priv(dev);
+
+ tstamp_config = &bp->tstamp_config;
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ return -EOPNOTSUPP;
+
+ if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
+ return -EFAULT;
+ else
+ return 0;
+}
+
+static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+{
+ u32 reg_val;
+
+ reg_val = macb_readl(bp, NCR);
+
+ if (enable)
+ macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
+ else
+ macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
+
+ return 0;
+}
+
+int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
+ enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
+ struct hwtstamp_config *tstamp_config;
+ struct macb *bp = netdev_priv(dev);
+ u32 regval;
+
+ tstamp_config = &bp->tstamp_config;
+ if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(tstamp_config, ifr->ifr_data,
+ sizeof(*tstamp_config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (tstamp_config->flags)
+ return -EINVAL;
+
+ switch (tstamp_config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (gem_ptp_set_one_step_sync(bp, 1) != 0)
+ return -ERANGE;
+ case HWTSTAMP_TX_ON:
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (tstamp_config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ rx_bd_control = TSTAMP_ALL_PTP_FRAMES;
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ regval = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM)));
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ rx_bd_control = TSTAMP_ALL_FRAMES;
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
+ return -ERANGE;
+
+ if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
+ return -EFAULT;
+ else
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 962dcbcef8b5..6081c3132135 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,9 +493,8 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -638,7 +637,7 @@ static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -1343,8 +1342,7 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index b6117b6a1de2..9338a0008378 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,9 +165,8 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -249,7 +248,7 @@ static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -431,11 +430,6 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
mbox_cmd.fn_arg = &status;
- /* Interrupts are not enabled at this point.
- * Enable them with default oq ticks
- */
- oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
-
octeon_mbox_write(oct, &mbox_cmd);
atomic_set(&status, 0);
@@ -444,11 +438,6 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
} while ((!atomic_read(&status)) && (count++ < 100000));
- /* Disable the interrupt so that the interrupsts will be reenabled
- * with the oq ticks received from the PF
- */
- oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
-
ret = atomic_read(&status);
if (!ret) {
dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index bdec051107a6..b28253c96d97 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -209,9 +209,6 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
}
- /* / Select Info Ptr for length & data */
- octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
-
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
@@ -314,7 +311,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -734,8 +731,7 @@ int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
__func__);
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 796c2cbc11f6..adde7745d069 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -202,9 +202,13 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
+ case OCTNET_CMD_VLAN_FILTER_CTL:
+ if (nctrl->ncmd.s.param1)
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter enabled\n", netdev->name);
+ else
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter disabled\n", netdev->name);
break;
case OCTNET_CMD_ADD_VLAN_FILTER:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 579dc7336f58..28ecda3d3404 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -700,6 +700,13 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
lio->msg_enable = msglvl;
}
+static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ lio->msg_enable = msglvl;
+}
+
static void
lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
@@ -984,11 +991,11 @@ lio_get_ethtool_stats(struct net_device *netdev,
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
/*# of instructions processed */
- data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
- stats.instr_processed);
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
/*# of instructions could not be processed */
- data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
- stats.instr_dropped);
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
/*bytes sent through the queue */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
@@ -1801,7 +1808,7 @@ oct_cfg_rx_intrcnt(struct lio *lio,
(octeon_read_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
- rx_max_coalesced_frames);
+ (rx_max_coalesced_frames - 1));
/*consider writing to resend bit here*/
}
intrmod->rx_frames = rx_max_coalesced_frames;
@@ -2611,7 +2618,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
- .set_msglevel = lio_set_msglevel,
+ .set_msglevel = lio_vf_set_msglevel,
.get_sset_count = lio_vf_get_sset_count,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 927617cbf6a9..51583ae4b1eb 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1421,7 +1421,7 @@ static bool fw_type_is_none(void)
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
- int i;
+ int i, refcount;
struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1556,10 +1556,14 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+ refcount = octeon_deregister_device(oct);
+
if (!fw_type_is_none()) {
- /* Soft reset the octeon device before exiting */
- if (!OCTEON_CN23XX_PF(oct) ||
- (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+ /* Soft reset the octeon device before exiting.
+ * Implementation note: here, we reset the device
+ * if it is a CN6XXX OR the last CN23XX device.
+ */
+ if (OCTEON_CN6XXX(oct) || !refcount)
oct->fn_list.soft_reset(oct);
}
@@ -3020,6 +3024,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -3586,6 +3591,10 @@ static netdev_features_t liquidio_fix_features(struct net_device *netdev,
(lio->dev_capability & NETIF_F_LRO))
request &= ~NETIF_F_LRO;
+ if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
+ request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
return request;
}
@@ -3598,14 +3607,14 @@ static int liquidio_set_features(struct net_device *netdev,
{
struct lio *lio = netdev_priv(netdev);
- if (!((netdev->features ^ features) & NETIF_F_LRO))
- return 0;
-
- if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ if ((features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO) &&
+ !(netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
- (lio->dev_capability & NETIF_F_LRO))
+ (lio->dev_capability & NETIF_F_LRO) &&
+ (netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
@@ -3624,6 +3633,17 @@ static int liquidio_set_features(struct net_device *netdev,
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_DISABLE);
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_DISABLE);
+
return 0;
}
@@ -3694,6 +3714,9 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
struct octeon_device *oct = lio->oct_dev;
int retval;
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
if (!retval)
cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
@@ -3876,7 +3899,7 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
union oct_link_status *ls;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -3884,7 +3907,8 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
for (i = 0; i < oct->ifcount; i++) {
@@ -4191,7 +4215,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
- liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
if ((debug != -1) && (debug & NETIF_MSG_HW))
liquidio_set_feature(netdev,
@@ -4441,7 +4466,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
u64 *data, vf_num;
notice = recv_pkt->rh.r.ossp;
- data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
+ data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
/* the first 64-bit word of data is the vf_num */
vf_num = data[0];
@@ -4511,6 +4536,15 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+ /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
+ * since that is what is required for the reference to be removed
+ * during de-initialization (see 'octeon_destroy_resources').
+ */
+ octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
+ PCI_SLOT(octeon_dev->pci_dev->devfn),
+ PCI_FUNC(octeon_dev->pci_dev->devfn),
+ true);
+
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
if (OCTEON_CN23XX_PF(octeon_dev)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 34c77821fad9..9b247102eb92 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -879,8 +879,6 @@ liquidio_vf_probe(struct pci_dev *pdev,
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
- u16 status;
-
pci_save_state(oct->pci_dev);
pci_cfg_access_lock(oct->pci_dev);
@@ -889,20 +887,7 @@ static void octeon_pci_flr(struct octeon_device *oct)
pci_write_config_word(oct->pci_dev, PCI_COMMAND,
PCI_COMMAND_INTX_DISABLE);
- /* Wait for Transaction Pending bit clean */
- msleep(100);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND) {
- dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
- ssleep(5);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
- &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
- }
- pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ pcie_flr(oct->pci_dev);
pci_cfg_access_unlock(oct->pci_dev);
@@ -2100,6 +2085,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -2732,7 +2718,7 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
int gmxport = 0;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -2740,7 +2726,8 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
@@ -3011,10 +2998,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
- if ((debug != -1) && (debug & NETIF_MSG_HW))
- liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
- 0);
-
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
@@ -3202,13 +3185,28 @@ static int octeon_device_init(struct octeon_device *oct)
if (octeon_setup_interrupt(oct))
return 1;
+ atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+
+ /* ***************************************************************
+ * The interrupts need to be enabled for the PF<-->VF handshake.
+ * They are [re]-enabled after the PF<-->VF handshake so that the
+ * correct OQ tick value is used (i.e. the value retrieved from
+ * the PF as part of the handshake).
+ */
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
if (cn23xx_octeon_pfvf_handshake(oct))
return 1;
+ /* Here we [re]-enable the interrupts so that the correct OQ tick value
+ * is used (i.e. the value that was retrieved during the handshake)
+ */
+
/* Enable Octeon device interrupts */
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
-
- atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+ /* *************************************************************** */
/* Enable the input and output queues for this Octeon device */
if (oct->fn_list.enable_io_queues(oct)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 8ea2323d8d67..231dd7fbfb80 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -173,6 +173,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
/*------------------------- End Scatter/Gather ---------------------------*/
+#define OCTNET_FRM_LENGTH_SIZE 8
+
#define OCTNET_FRM_PTP_HEADER_SIZE 8
#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
@@ -215,7 +217,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
-#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_VLAN_FILTER_CTL 0x16
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
@@ -230,6 +232,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_RXCSUM_DISABLE 0x1
#define OCTNET_CMD_TXCSUM_ENABLE 0x0
#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index d29ebc531151..f229d792c2b3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -47,7 +47,7 @@
/* CN6xxx OQ configuration macros */
#define CN6XXX_MAX_OUTPUT_QUEUES 32
#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
-#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_BUF_SIZE 1664
#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
(CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
@@ -78,7 +78,7 @@
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
#define CN23XX_MAX_OQ_DESCRIPTORS 512
-#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_BUF_SIZE 1664
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
#define CN23XX_OQ_REFIL_THRESHOLD 16
@@ -98,8 +98,6 @@
#define OCTEON_32BYTE_INSTR 32
#define OCTEON_64BYTE_INSTR 64
#define OCTEON_MAX_BASE_IOQ 4
-#define OCTEON_OQ_BUFPTR_MODE 0
-#define OCTEON_OQ_INFOPTR_MODE 1
#define OCTEON_DMA_INTR_PKT 64
#define OCTEON_DMA_INTR_TIME 1000
@@ -125,7 +123,6 @@
#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
-#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
@@ -266,9 +263,6 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/* Max number of OQs available */
u64 max_oqs:8;
@@ -276,9 +270,6 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/** The number of buffers that were consumed during packet processing by
* the driver on this Output queue before the driver attempts to
* replenish
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 53f38d05f7c2..e08f7600f986 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -724,13 +724,11 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
}
#define FBUF_SIZE (4 * 1024 * 1024)
-u8 fbuf[FBUF_SIZE];
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
int ret = 0;
- u8 *p = fbuf;
u32 crc32_result;
u64 load_addr;
u32 image_len;
@@ -805,10 +803,8 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
else
size = FBUF_SIZE;
- memcpy(p, data, size);
-
/* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+ octeon_pci_write_core_mem(oct, load_addr, data, (u32)size);
data += size;
rem -= (u32)size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index e21b477d0159..623e28ca736e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -51,7 +51,6 @@ static struct octeon_config default_cn66xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -161,7 +160,6 @@ static struct octeon_config default_cn68xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -328,7 +326,6 @@ static struct octeon_config default_cn68xx_210nv_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -432,7 +429,6 @@ static struct octeon_config default_cn23xx_conf = {
/** OQ attributes */
.oq = {
.max_oqs = CN23XX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
.refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN23XX_OQ_INTR_PKT,
@@ -543,7 +539,11 @@ static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
"BASE", "NIC", "UNKNOWN"};
static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
+
static u32 octeon_device_count;
+/* locks device array (i.e. octeon_device[]) */
+static spinlock_t octeon_devices_lock;
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -561,6 +561,7 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
+ spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
@@ -720,23 +721,27 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
u32 oct_idx = 0;
struct octeon_device *oct = NULL;
+ spin_lock(&octeon_devices_lock);
+
for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
if (!octeon_device[oct_idx])
break;
- if (oct_idx == MAX_OCTEON_DEVICES)
- return NULL;
+ if (oct_idx < MAX_OCTEON_DEVICES) {
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (oct) {
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+ }
+ }
- oct = octeon_allocate_device_mem(pci_id, priv_size);
+ spin_unlock(&octeon_devices_lock);
if (!oct)
return NULL;
spin_lock_init(&oct->pci_win_lock);
spin_lock_init(&oct->mem_access_lock);
- octeon_device_count++;
- octeon_device[oct_idx] = oct;
-
oct->octeon_id = oct_idx;
snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
@@ -744,6 +749,72 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf)
+{
+ int idx, refcount;
+
+ oct->loc.bus = bus;
+ oct->loc.dev = dev;
+ oct->loc.func = func;
+
+ oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
+ atomic_set(oct->adapter_refcount, 0);
+
+ spin_lock(&octeon_devices_lock);
+ for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
+ if (!octeon_device[idx]) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: Internal driver error, missing dev",
+ __func__);
+ spin_unlock(&octeon_devices_lock);
+ atomic_inc(oct->adapter_refcount);
+ return 1; /* here, refcount is guaranteed to be 1 */
+ }
+ /* if another device is at same bus/dev, use its refcounter */
+ if ((octeon_device[idx]->loc.bus == bus) &&
+ (octeon_device[idx]->loc.dev == dev)) {
+ oct->adapter_refcount =
+ octeon_device[idx]->adapter_refcount;
+ break;
+ }
+ }
+ spin_unlock(&octeon_devices_lock);
+
+ atomic_inc(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct)
+{
+ int refcount;
+
+ atomic_dec(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
int
octeon_allocate_ioq_vector(struct octeon_device *oct)
{
@@ -1161,13 +1232,15 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
cs = &core_setup[oct->octeon_id];
- if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
(u32)sizeof(*cs),
recv_pkt->buffer_size[0]);
}
- memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ memcpy(cs, get_rbd(
+ recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
+
strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
OCT_SERIAL_LEN);
@@ -1354,13 +1427,15 @@ int lio_get_device_id(void *dev)
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
+ u32 pkts_pend;
struct octeon_device *oct = NULL;
/* the whole thing needs to be atomic, ideally */
if (droq) {
+ pkts_pend = (u32)atomic_read(&droq->pkts_pending);
spin_lock_bh(&droq->lock);
- writel(droq->pkt_count, droq->pkts_sent_reg);
- droq->pkt_count = 0;
+ writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
+ droq->pkt_count = pkts_pend;
/* this write needs to be flushed before we release the lock */
mmiowb();
spin_unlock_bh(&droq->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 92f67de111aa..c90ed48ae8ab 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -544,6 +544,14 @@ struct octeon_device {
u32 tx_max_coalesced_frames;
bool cores_crashed;
+
+ struct {
+ int bus;
+ int dev;
+ int func;
+ } loc;
+
+ atomic_t *adapter_refcount; /* reference count of adapter */
};
#define OCT_DRV_ONLINE 1
@@ -572,6 +580,23 @@ void octeon_free_device_mem(struct octeon_device *oct);
struct octeon_device *octeon_allocate_device(u32 pci_id,
u32 priv_size);
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf);
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct);
+
/** Initialize the driver's dispatch list which is a mix of a hash table
* and a linked list. This is done at driver load time.
* @param octeon_dev - pointer to the octeon device structure.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 286be5539cef..2e190deb2233 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -181,10 +181,7 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
- droq->info_list[i].length = 0;
-
- /* map ring buffers into memory */
- desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].info_ptr = 0;
desc_ring[i].buffer_ptr =
lio_map_ring(droq->recv_buf_list[i].buffer);
}
@@ -205,9 +202,6 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
octeon_droq_destroy_ring_buffers(oct, droq);
vfree(droq->recv_buf_list);
- if (droq->info_base_addr)
- lio_free_info_buffer(oct, droq);
-
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
droq->desc_ring, droq->desc_ring_dma);
@@ -280,14 +274,6 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list = lio_alloc_info_buffer(oct, droq);
- if (!droq->info_list) {
- dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
- lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
- droq->desc_ring, droq->desc_ring_dma);
- return 1;
- }
-
droq->recv_buf_list = (struct octeon_recv_buffer *)
vmalloc_node(droq->max_count *
OCT_DROQ_RECVBUF_SIZE,
@@ -357,7 +343,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
u32 i, bytes_left;
struct octeon_skb_page_info *pg_info;
- info = &droq->info_list[idx];
+ info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
if (!recv_info)
@@ -425,8 +411,7 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
droq->max_count);
desc_refilled++;
droq->refill_count--;
- } while (droq->recv_buf_list[droq->refill_idx].
- buffer);
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
}
refill_index = incr_index(refill_index, 1, droq->max_count);
} /* while */
@@ -490,10 +475,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
- lio_map_ring(droq->recv_buf_list[droq->
- refill_idx].buffer);
- /* Reset any previous values in the length field. */
- droq->info_list[droq->refill_idx].length = 0;
+ lio_map_ring(droq->recv_buf_list[
+ droq->refill_idx].buffer);
droq->refill_idx = incr_index(droq->refill_idx, 1,
droq->max_count);
@@ -542,11 +525,7 @@ void octeon_droq_check_oom(struct octeon_droq *droq)
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
- u32 buf_cnt = 0;
-
- while (total_len > (buf_size * buf_cnt))
- buf_cnt++;
- return buf_cnt;
+ return ((total_len + buf_size - 1) / buf_size);
}
static int
@@ -594,11 +573,12 @@ static inline void octeon_droq_drop_packets(struct octeon_device *oct,
struct octeon_droq_info *info;
for (i = 0; i < cnt; i++) {
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (info->length) {
- info->length -= OCT_RH_SIZE;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
droq->stats.bytes_received += info->length;
buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
(u32)info->length);
@@ -630,7 +610,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
struct octeon_skb_page_info *pg_info;
void *buf;
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (!info->length) {
@@ -644,9 +625,10 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
/* Len of resp hdr in included in the received data len. */
- info->length -= OCT_RH_SIZE;
rh = &info->rh;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
+ rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
total_len += (u32)info->length;
if (opcode_slow_path(rh)) {
u32 buf_cnt;
@@ -690,8 +672,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
nicbuf,
cpy_len,
idx);
- buf = droq->recv_buf_list[idx].
- buffer;
+ buf = droq->recv_buf_list[
+ idx].buffer;
recv_buffer_fast_free(buf);
droq->recv_buf_list[idx].buffer
= NULL;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 9781577115e7..6efd139b894d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -51,11 +51,11 @@ struct octeon_droq_desc {
* about the packet.
*/
struct octeon_droq_info {
- /** The Output Receive Header. */
- union octeon_rh rh;
-
/** The Length of the packet. */
u64 length;
+
+ /** The Output Receive Header. */
+ union octeon_rh rh;
};
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
@@ -294,9 +294,6 @@ struct octeon_droq {
*/
u32 max_empty_descs;
- /** The 8B aligned info ptrs begin from this address. */
- struct octeon_droq_info *info_list;
-
/** The receive buffer list. This list has the virtual addresses of the
* buffers.
*/
@@ -324,15 +321,6 @@ struct octeon_droq {
/** DMA mapped address of the DROQ descriptor ring. */
size_t desc_ring_dma;
- /** Info ptr list are allocated at this virtual address. */
- void *info_base_addr;
-
- /** DMA mapped address of the info list */
- dma_addr_t info_list_dma;
-
- /** Allocated size of info list. */
- u32 info_alloc_size;
-
/** application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5063a12613e5..5c3c8da976f7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -251,7 +251,7 @@ union octeon_instr_64B {
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1536
+#define SOFT_COMMAND_BUFFER_SIZE 2048
struct octeon_soft_command {
/** Soft command buffer info. */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 5cca73b8880b..57af7df74ced 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -178,7 +178,10 @@ int octeon_mbox_write(struct octeon_device *oct,
break;
}
}
- writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS)
+ writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ else
+ break;
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index c9376fe075bc..1def22afeff1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -20,16 +20,16 @@
/* Macros for Mail Box Communication */
-#define OCTEON_MBOX_DATA_MAX 32
+#define OCTEON_MBOX_DATA_MAX 32
#define OCTEON_VF_ACTIVE 0x1
#define OCTEON_VF_FLR_REQUEST 0x2
#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
/*Macro for Read acknowldgement*/
-#define OCTEON_PFVFACK 0xffffffffffffffff
-#define OCTEON_PFVFSIG 0x1122334455667788
-#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
+#define OCTEON_PFVFACK 0xffffffffffffffffULL
+#define OCTEON_PFVFSIG 0x1122334455667788ULL
+#define OCTEON_PFVFERR 0xDEADDEADDEADDEADULL
#define LIO_MBOX_WRITE_WAIT_CNT 1000
#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
@@ -74,8 +74,8 @@ enum octeon_mbox_state {
OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4,
OCTEON_MBOX_STATE_RESPONSE_PENDING = 8,
OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16,
- OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 16,
- OCTEON_MBOX_STATE_ERROR = 32
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 32,
+ OCTEON_MBOX_STATE_ERROR = 64
};
struct octeon_mbox {
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5cd96e7d426c..4c85ae643b7b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -167,10 +167,10 @@ octeon_pci_read_core_mem(struct octeon_device *oct,
void
octeon_pci_write_core_mem(struct octeon_device *oct,
u64 coreaddr,
- u8 *buf,
+ const u8 *buf,
u32 len)
{
- __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
}
u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
index bae2fdd89503..47a3ff5f9b1e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -66,7 +66,7 @@ octeon_pci_read_core_mem(struct octeon_device *oct,
void
octeon_pci_write_core_mem(struct octeon_device *oct,
u64 coreaddr,
- u8 *buf,
+ const u8 *buf,
u32 len);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index bf483932ff25..ec8504b2942d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -356,29 +356,6 @@ static inline void tx_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
-static inline void *
-lio_alloc_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- void *virt_ptr;
-
- virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_list_dma);
- if (virt_ptr) {
- droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
- droq->info_base_addr = virt_ptr;
- }
-
- return virt_ptr;
-}
-
-static inline void lio_free_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
- droq->info_list_dma);
-}
-
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -392,12 +369,6 @@ void *get_rbd(struct sk_buff *skb)
}
static inline u64
-lio_map_ring_info(struct octeon_droq *droq, u32 i)
-{
- return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
-}
-
-static inline u64
lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
@@ -443,8 +414,8 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq,
int copy_len,
int idx)
{
- memcpy(skb_put(nicbuf, copy_len),
- get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+ skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
+ copy_len);
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 261f448f9de2..7b297f1f6dbe 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -252,8 +252,7 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
pending =
- atomic_read(&oct->
- instr_queue[i]->instr_pending);
+ atomic_read(&oct->instr_queue[i]->instr_pending);
if (pending)
__check_db_timeout(oct, i);
instr_cnt += pending;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index a2138686c605..2887bcaf6af5 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -755,6 +755,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
p->has_rx_tstamp = have_hw_timestamps;
config.rx_filter = HWTSTAMP_FILTER_ALL;
if (p->has_rx_tstamp) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d6477af88085..49b80da51ba7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -227,15 +227,14 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->mac_type = mbx.link_status.mac_type;
if (nic->link_up) {
- netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
- nic->netdev->name, nic->speed,
+ netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
nic->duplex == DUPLEX_FULL ?
- "Full duplex" : "Half duplex");
+ "Full" : "Half");
netif_carrier_on(nic->netdev);
netif_tx_start_all_queues(nic->netdev);
} else {
- netdev_info(nic->netdev, "%s: Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "Link is Down\n");
netif_carrier_off(nic->netdev);
netif_tx_stop_all_queues(nic->netdev);
}
@@ -721,8 +720,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
if (netif_msg_pktdata(nic)) {
- netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
- skb, skb->len);
+ netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, true);
}
@@ -854,10 +852,8 @@ done:
netif_tx_wake_queue(txq);
nic = nic->pnicvf;
this_cpu_inc(nic->drv_stats->txq_wake);
- if (netif_msg_tx_err(nic))
- netdev_warn(netdev,
- "%s: Transmit queue wakeup SQ%d\n",
- netdev->name, txq_idx);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit queue wakeup SQ%d\n", txq_idx);
}
}
@@ -928,9 +924,8 @@ static void nicvf_handle_qs_err(unsigned long data)
static void nicvf_dump_intr_status(struct nicvf *nic)
{
- if (netif_msg_intr(nic))
- netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
- nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+ netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
+ nicvf_reg_read(nic, NIC_VF_INT));
}
static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
@@ -1212,10 +1207,8 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_tx_wake_queue(txq);
} else {
this_cpu_inc(nic->drv_stats->txq_stop);
- if (netif_msg_tx_err(nic))
- netdev_warn(netdev,
- "%s: Transmit ring full, stopping SQ%d\n",
- netdev->name, qid);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit ring full, stopping SQ%d\n", qid);
}
return NETDEV_TX_BUSY;
}
@@ -1600,9 +1593,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- if (netif_msg_tx_err(nic))
- netdev_warn(dev, "%s: Transmit timed out, resetting\n",
- dev->name);
+ netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
@@ -1763,6 +1754,7 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!nic->xdp_prog;
+ xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 2b181762ad49..d4496e9afcdf 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1811,11 +1811,9 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- if (netif_msg_rx_err(nic))
- netdev_err(nic->netdev,
- "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
- nic->netdev->name,
- cqe_rx->err_level, cqe_rx->err_opcode);
+ netif_err(nic, rx_err, nic->netdev,
+ "RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ cqe_rx->err_level, cqe_rx->err_opcode);
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index d56142b98534..0f13a7f7c1d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1801,7 +1801,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
eth_type = skb_network_offset(skb) == ETH_HLEN ?
CPL_ETH_II : CPL_ETH_II_VLAN;
- hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+ hdr = skb_push(skb, sizeof(*hdr));
hdr->opcode = CPL_TX_PKT_LSO;
hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
hdr->ip_hdr_words = ip_hdr(skb)->ihl;
@@ -1849,7 +1849,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+ cpl = __skb_push(skb, sizeof(*cpl));
cpl->opcode = CPL_TX_PKT;
cpl->ip_csum_dis = 1; /* SW calculates IP csum */
cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2ff6bd139c96..0bc6a4ffce30 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -471,8 +471,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->mtu_idx = NMTUS - 1;
@@ -495,8 +494,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
@@ -518,8 +516,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
@@ -538,8 +535,7 @@ static int init_tp_parity(struct adapter *adap)
if (!skb)
goto alloc_skb_fail;
- greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
- memset(greq, 0, sizeof(*greq));
+ greq = __skb_put_zero(skb, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
greq->mask = cpu_to_be64(1);
@@ -909,7 +905,7 @@ static int write_smt_entry(struct adapter *adapter, int idx)
if (!skb)
return -ENOMEM;
- req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
@@ -952,7 +948,7 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
if (!skb)
return -ENOMEM;
- req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
req->sched = sched;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index fa81445e334c..50cd660732c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -552,7 +552,7 @@ static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
struct cpl_tid_release *req;
skb->priority = CPL_PRIORITY_SETUP;
- req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
@@ -1096,7 +1096,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
- req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 26264125865f..248e40c6966c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -96,7 +96,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
return -ENOMEM;
}
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 1b9d154f1149..e2d342647b19 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2282,7 +2282,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
if (!skb)
goto no_mem;
- memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+ __skb_put_data(skb, r, AN_PKT_SIZE);
skb->data[0] = CPL_ASYNC_NOTIF;
rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
q->async_notif++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index c6b71f656992..817212702f0a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o cxgb4_ptp.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b32eb8c7c1e3..ef4be781fd05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -48,6 +48,8 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -362,6 +364,11 @@ struct adapter_params {
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
unsigned int max_ird_adapter; /* Max read depth per adapter */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
+
+ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
+ * used by the Port
+ */
+ u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -505,6 +512,7 @@ struct port_info {
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
struct hwtstamp_config tstamp_config;
+ bool ptp_enable;
struct sched_table *sched_tbl;
};
@@ -700,6 +708,7 @@ struct sge_uld_txq_info {
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_txq ptptxq;
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
@@ -777,6 +786,7 @@ struct uld_msix_info {
struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
+ unsigned int tx_rate;
bool pf_set_mac;
};
@@ -863,6 +873,11 @@ struct adapter {
* used for all 4 filters.
*/
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct sk_buff *ptp_tx_skb;
+ /* ptp lock */
+ spinlock_t ptp_lock;
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1434,7 +1449,8 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
-unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1494,9 +1510,12 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val, int rw);
+ u32 *val, int rw, bool sleep_ok);
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
@@ -1552,6 +1571,7 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
+int t4_update_port_info(struct port_info *pi);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 77a59d7db7f5..76540b0e082d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2669,6 +2669,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
+ seq_printf(seq, "Connections in use: %u\n",
+ atomic_read(&t->conns_in_use));
if (chip <= CHELSIO_T5)
sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
@@ -2699,17 +2701,23 @@ static int tid_info_show(struct seq_file *seq, void *v)
atomic_read(&t->hash_tids_in_use));
}
} else if (t->ntids) {
+ seq_printf(seq, "Connections in use: %u\n",
+ atomic_read(&t->conns_in_use));
+
seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
if (t->nstids)
- seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+ seq_printf(seq, "STID range: %u..%u, in use-IPv4/IPv6: %u/%u\n",
(!t->stid_base &&
(chip <= CHELSIO_T5)) ?
t->stid_base + 1 : t->stid_base,
- t->stid_base + t->nstids - 1, t->stids_in_use);
+ t->stid_base + t->nstids - 1,
+ t->stids_in_use - t->v6_stids_in_use,
+ t->v6_stids_in_use);
+
if (t->natids)
seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
t->natids - 1, t->atids_in_use);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 0ba7866c8259..26eb00a45db1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -500,7 +500,11 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
} else if (port_type == FW_PORT_TYPE_SFP ||
port_type == FW_PORT_TYPE_QSFP_10G ||
port_type == FW_PORT_TYPE_QSA ||
- port_type == FW_PORT_TYPE_QSFP) {
+ port_type == FW_PORT_TYPE_QSFP ||
+ port_type == FW_PORT_TYPE_CR4_QSFP ||
+ port_type == FW_PORT_TYPE_CR_QSFP ||
+ port_type == FW_PORT_TYPE_CR2_QSFP ||
+ port_type == FW_PORT_TYPE_SFP28) {
if (mod_type == FW_PORT_MOD_TYPE_LR ||
mod_type == FW_PORT_MOD_TYPE_SR ||
mod_type == FW_PORT_MOD_TYPE_ER ||
@@ -511,6 +515,9 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
return PORT_DA;
else
return PORT_OTHER;
+ } else if (port_type == FW_PORT_TYPE_KR4_100G ||
+ port_type == FW_PORT_TYPE_KR_SFP28) {
+ return PORT_NONE;
}
return PORT_OTHER;
@@ -618,7 +625,21 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_SFP28:
SET_LMM(FIBRE);
- SET_LMM(25000baseCR_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR_SFP28:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(50000baseSR2_Full);
break;
case FW_PORT_TYPE_KR4_100G:
@@ -674,13 +695,20 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
static int get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- const struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
struct ethtool_link_settings *base = &link_ksettings->base;
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+ /* For the nonce, the Firmware doesn't send up Port State changes
+ * when the Virtual Interface attached to the Port is down. So
+ * if it's down, let's grab any changes.
+ */
+ if (!netif_running(dev))
+ (void)t4_update_port_info(pi);
+
base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
if (pi->mdio_addr >= 0) {
@@ -1085,14 +1113,31 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- ts_info->phc_index = -1;
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ if (adapter->ptp_clock)
+ ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ ts_info->phc_index = -1;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 10736738ff30..45b5853ca2f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -190,7 +190,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
if (!skb)
return -ENOMEM;
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ fwr = __skb_put(skb, len);
t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -231,8 +231,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
}
}
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
+ fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
* of the work is translating the cxgbtool ch_filter_specification
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 53309f659951..86f92e31e8aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -79,6 +79,7 @@
#include "l2t.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
+#include "cxgb4_ptp.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -824,9 +825,12 @@ static int setup_sge_queues(struct adapter *adap)
{
int err, i, j;
struct sge *s = &adap->sge;
- struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ struct sge_uld_rxq_info *rxq_info = NULL;
unsigned int cmplqid = 0;
+ if (is_uld(adap))
+ rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -840,8 +844,8 @@ static int setup_sge_queues(struct adapter *adap)
adap->msi_idx, &q->fl,
t4_ethrx_handler,
NULL,
- t4_get_mps_bg_map(adap,
- pi->tx_chan));
+ t4_get_tp_ch_map(adap,
+ pi->tx_chan));
if (err)
goto freeout;
q->rspq.idx = j;
@@ -869,6 +873,14 @@ static int setup_sge_queues(struct adapter *adap)
goto freeout;
}
+ if (!is_t4(adap->params.chip)) {
+ err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
+ netdev_get_tx_queue(adap->port[0], 0)
+ , s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
t4_write_reg(adap, is_t4(adap->params.chip) ?
MPS_TRC_RSS_CONTROL_A :
MPS_T5_TRC_RSS_CONTROL_A,
@@ -891,7 +903,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
* The skb's priority is determined via the VLAN Tag Priority Code
* Point field.
*/
- if (cxgb4_dcb_enabled(dev)) {
+ if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
u16 vlan_tci;
int err;
@@ -1093,10 +1105,12 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
* This is equivalent to 4 TIDs. With CLIP enabled it
* needs 2 TIDs.
*/
- if (family == PF_INET)
- t->stids_in_use++;
- else
+ if (family == PF_INET6) {
t->stids_in_use += 2;
+ t->v6_stids_in_use += 2;
+ } else {
+ t->stids_in_use++;
+ }
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1150,13 +1164,16 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
bitmap_release_region(t->stid_bmap, stid, 1);
t->stid_tab[stid].data = NULL;
if (stid < t->nstids) {
- if (family == PF_INET)
- t->stids_in_use--;
- else
+ if (family == PF_INET6) {
t->stids_in_use -= 2;
+ t->v6_stids_in_use -= 2;
+ } else {
+ t->stids_in_use--;
+ }
} else {
t->sftids_in_use--;
}
+
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1170,7 +1187,7 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
struct cpl_tid_release *req;
set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
- req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
@@ -1232,7 +1249,8 @@ static void process_tid_release_list(struct work_struct *work)
* Release a TID and inform HW. If we are unable to allocate the release
* message we defer to a work queue.
*/
-void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
{
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
@@ -1241,10 +1259,18 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- if (t->hash_base && (tid >= t->hash_base))
- atomic_dec(&t->hash_tids_in_use);
- else
- atomic_dec(&t->tids_in_use);
+ atomic_dec(&t->conns_in_use);
+ if (t->hash_base && (tid >= t->hash_base)) {
+ if (family == AF_INET6)
+ atomic_sub(2, &t->hash_tids_in_use);
+ else
+ atomic_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == AF_INET6)
+ atomic_sub(2, &t->tids_in_use);
+ else
+ atomic_dec(&t->tids_in_use);
+ }
}
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
@@ -1292,10 +1318,12 @@ static int tid_init(struct tid_info *t)
spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
+ t->v6_stids_in_use = 0;
t->sftids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
+ atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
@@ -1343,7 +1371,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
return -ENOMEM;
adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
req->local_port = sport;
@@ -1384,7 +1412,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
return -ENOMEM;
adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
req->local_port = sport;
@@ -1416,7 +1444,7 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
if (!skb)
return -ENOMEM;
- req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
@@ -2251,6 +2279,13 @@ static int cxgb_open(struct net_device *dev)
return err;
}
+ /* It's possible that the basic port information could have
+ * changed since we first read it.
+ */
+ err = t4_update_port_info(pi);
+ if (err < 0)
+ return err;
+
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -2412,6 +2447,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
unsigned int mbox;
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
@@ -2449,18 +2485,69 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
sizeof(pi->tstamp_config)))
return -EFAULT;
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
+ if (!is_t4(adapter->params.chip)) {
+ switch (pi->tstamp_config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (pi->tstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id,
+ PTP_TS_L4);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id,
+ PTP_TS_L2_L4);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pi->rxtstamp = true;
+ break;
+ default:
+ pi->tstamp_config.rx_filter =
+ HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
+ (pi->tstamp_config.rx_filter ==
+ HWTSTAMP_FILTER_NONE)) {
+ if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
+ pi->ptp_enable = false;
+ }
+
+ if (pi->tstamp_config.rx_filter !=
+ HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_redirect_rx_packet(adapter,
+ pi) >= 0)
+ pi->ptp_enable = true;
+ }
+ } else {
+ /* For T4 Adapters */
+ switch (pi->tstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
pi->rxtstamp = false;
break;
- case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_ALL:
pi->rxtstamp = true;
break;
- default:
- pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ default:
+ pi->tstamp_config.rx_filter =
+ HWTSTAMP_FILTER_NONE;
return -ERANGE;
+ }
}
-
return copy_to_user(req->ifr_data, &pi->tstamp_config,
sizeof(pi->tstamp_config)) ?
-EFAULT : 0;
@@ -2562,6 +2649,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
if (vf >= adap->num_vfs)
return -EINVAL;
ivi->vf = vf;
+ ivi->max_tx_rate = adap->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
return 0;
}
@@ -2578,6 +2667,109 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
+static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct fw_port_cmd port_cmd, port_rpl;
+ u32 link_status, speed = 0;
+ u32 fw_pfvf, fw_class;
+ int class_id = vf;
+ int link_ok, ret;
+ u16 pktsize;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+
+ if (min_tx_rate) {
+ dev_err(adap->pdev_dev,
+ "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
+ min_tx_rate, vf);
+ return -EINVAL;
+ }
+ /* Retrieve link details for VF port */
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
+ port_cmd.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS) {
+ dev_err(adap->pdev_dev,
+ "Failed to get link status for VF %d\n", vf);
+ return -EINVAL;
+ }
+ link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0;
+ if (!link_ok) {
+ dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
+ return -EINVAL;
+ }
+ /* Determine link speed */
+ if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ speed = 100;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ speed = 1000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ speed = 10000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
+
+ if (max_tx_rate > speed) {
+ dev_err(adap->pdev_dev,
+ "Max tx rate %d for VF %d can't be > link-speed %u",
+ max_tx_rate, vf, speed);
+ return -EINVAL;
+ }
+ pktsize = be16_to_cpu(port_rpl.u.info.mtu);
+ /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
+ pktsize = pktsize - sizeof(struct ethhdr) - 4;
+ /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
+ pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
+ /* configure Traffic Class for rate-limiting */
+ ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
+ SCHED_CLASS_LEVEL_CL_RL,
+ SCHED_CLASS_MODE_CLASS,
+ SCHED_CLASS_RATEUNIT_BITS,
+ SCHED_CLASS_RATEMODE_ABS,
+ pi->port_id, class_id, 0,
+ max_tx_rate * 1000, 0, pktsize);
+ if (ret) {
+ dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
+ ret);
+ return -EINVAL;
+ }
+ dev_info(adap->pdev_dev,
+ "Class %d with MSS %u configured with rate %u\n",
+ class_id, pktsize, max_tx_rate);
+
+ /* bind VF to configured Traffic Class */
+ fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
+ fw_class = class_id;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
+ &fw_class);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Err %d in binding VF %d to Traffic Class %d\n",
+ ret, vf, class_id);
+ return -EINVAL;
+ }
+ dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
+ adap->pf, vf, class_id);
+ adap->vfinfo[vf].tx_rate = max_tx_rate;
+ return 0;
+}
+
#endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2697,12 +2889,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
if (!(adap->flags & FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
@@ -2726,6 +2921,16 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return -EOPNOTSUPP;
}
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ /* Disable GRO, if RX_CSUM is disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_GRO;
+
+ return features;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -2747,6 +2952,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
@@ -2754,6 +2960,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_open = dummy_open,
.ndo_set_vf_mac = cxgb_set_vf_mac,
.ndo_get_vf_config = cxgb_get_vf_config,
+ .ndo_set_vf_rate = cxgb_set_vf_rate,
.ndo_get_phys_port_id = cxgb_get_phys_port_id,
};
#endif
@@ -4018,10 +4225,7 @@ static void cfg_queues(struct adapter *adap)
/* Reduce memory usage in kdump environment, disable all offload.
*/
- if (is_kdump_kernel()) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
}
@@ -4042,7 +4246,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = 8;
+ pi->nqsets = is_kdump_kernel() ? 1 : 8;
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
@@ -4055,6 +4259,9 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
+ if (is_kdump_kernel())
+ q10g = 1;
+
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4094,6 +4301,9 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
s->ctrlq[i].q.size = 512;
+ if (!is_t4(adap->params.chip))
+ s->ptptxq.q.size = 8;
+
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
@@ -4960,6 +5170,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+ netif_carrier_off(adapter->port[i]);
+
err = register_netdev(adapter->port[i]);
if (err)
break;
@@ -4990,6 +5202,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_unlock(&uld_mutex);
}
+ if (!is_t4(adapter->params.chip))
+ cxgb4_ptp_init(adapter);
+
print_adapter_info(adapter);
setup_fw_sge_queues(adapter);
return 0;
@@ -5026,13 +5241,15 @@ sriov:
&v, &port_vec);
if (err < 0) {
dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_adapter;
+ goto free_mbox_log;
}
adapter->params.nports = hweight32(port_vec);
pci_set_drvdata(pdev, adapter);
return 0;
+free_mbox_log:
+ kfree(adapter->mbox_log);
free_adapter:
kfree(adapter);
free_pci_region:
@@ -5097,6 +5314,9 @@ static void remove_one(struct pci_dev *pdev)
debugfs_remove_recursive(adapter->debugfs_root);
+ if (!is_t4(adapter->params.chip))
+ cxgb4_ptp_stop(adapter);
+
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
@@ -5132,6 +5352,7 @@ static void remove_one(struct pci_dev *pdev)
unregister_netdev(adapter->port[0]);
iounmap(adapter->regs);
kfree(adapter->vfinfo);
+ kfree(adapter->mbox_log);
kfree(adapter);
pci_disable_sriov(pdev);
pci_release_regions(pdev);
@@ -5178,6 +5399,7 @@ static void shutdown_one(struct pci_dev *pdev)
unregister_netdev(adapter->port[0]);
iounmap(adapter->regs);
kfree(adapter->vfinfo);
+ kfree(adapter->mbox_log);
kfree(adapter);
pci_disable_sriov(pdev);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
new file mode 100644
index 000000000000..50517cfd9671
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -0,0 +1,475 @@
+/*
+ * cxgb4_ptp.c:Chelsio PTP support for T5/T6
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/udp.h>
+
+#include "cxgb4.h"
+#include "t4_hw.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "cxgb4_ptp.h"
+
+/**
+ * cxgb4_ptp_is_ptp_tx - determine whether TX packet is PTP or not
+ * @skb: skb of outgoing ptp request
+ *
+ */
+bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+
+ uh = udp_hdr(skb);
+ return skb->len >= PTP_MIN_LENGTH &&
+ skb->len <= PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+ likely(skb->protocol == htons(ETH_P_IP)) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ uh->dest == htons(PTP_EVENT_PORT);
+}
+
+bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port_info *pi;
+
+ pi = netdev_priv(dev);
+ return (pi->ptp_enable && cxgb4_xmit_with_hwtstamp(skb) &&
+ cxgb4_ptp_is_ptp_tx(skb));
+}
+
+/**
+ * cxgb4_ptp_is_ptp_rx - determine whether RX packet is PTP or not
+ * @skb: skb of incoming ptp request
+ *
+ */
+bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb)
+{
+ struct udphdr *uh = (struct udphdr *)(skb->data + ETH_HLEN +
+ IPV4_HLEN(skb->data));
+
+ return uh->dest == htons(PTP_EVENT_PORT) &&
+ uh->source == htons(PTP_EVENT_PORT);
+}
+
+/**
+ * cxgb4_ptp_read_hwstamp - read timestamp for TX event PTP message
+ * @adapter: board private structure
+ * @pi: port private structure
+ *
+ */
+void cxgb4_ptp_read_hwstamp(struct adapter *adapter, struct port_info *pi)
+{
+ struct skb_shared_hwtstamps *skb_ts = NULL;
+ u64 tx_ts;
+
+ skb_ts = skb_hwtstamps(adapter->ptp_tx_skb);
+
+ tx_ts = t4_read_reg(adapter,
+ T5_PORT_REG(pi->port_id, MAC_PORT_TX_TS_VAL_LO));
+
+ tx_ts |= (u64)t4_read_reg(adapter,
+ T5_PORT_REG(pi->port_id,
+ MAC_PORT_TX_TS_VAL_HI)) << 32;
+ skb_ts->hwtstamp = ns_to_ktime(tx_ts);
+ skb_tstamp_tx(adapter->ptp_tx_skb, skb_ts);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ spin_lock(&adapter->ptp_lock);
+ adapter->ptp_tx_skb = NULL;
+ spin_unlock(&adapter->ptp_lock);
+}
+
+/**
+ * cxgb4_ptprx_timestamping - Enable Timestamp for RX PTP event message
+ * @pi: port private structure
+ * @port: pot number
+ * @mode: RX mode
+ *
+ */
+int cxgb4_ptprx_timestamping(struct port_info *pi, u8 port, u16 mode)
+{
+ struct adapter *adapter = pi->adapter;
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(port));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.init.sc = FW_PTP_SC_RXTIME_STAMP;
+ c.u.init.mode = cpu_to_be16(mode);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+ return err;
+}
+
+int cxgb4_ptp_txtype(struct adapter *adapter, u8 port)
+{
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(port));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.init.sc = FW_PTP_SC_TX_TYPE;
+ c.u.init.mode = cpu_to_be16(PTP_TS_NONE);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+
+ return err;
+}
+
+int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *receive_q = &s->ethrxq[pi->first_qset];
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(pi->port_id));
+
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.init.sc = FW_PTP_SC_RDRX_TYPE;
+ c.u.init.txchan = pi->tx_chan;
+ c.u.init.absid = cpu_to_be16(receive_q->rspq.abs_id);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+ return err;
+}
+
+/**
+ * @ptp: ptp clock structure
+ * @ppb: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated ppb from
+ * the base frequency.
+ */
+static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct adapter *adapter = (struct adapter *)container_of(ptp,
+ struct adapter, ptp_clock_info);
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.ts.sc = FW_PTP_SC_ADJ_FREQ;
+ c.u.ts.sign = (ppb < 0) ? 1 : 0;
+ if (ppb < 0)
+ ppb = -ppb;
+ c.u.ts.ppb = cpu_to_be32(ppb);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+
+ return err;
+}
+
+/**
+ * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ */
+static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
+{
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+ c.u.ts.tm = cpu_to_be64(delta);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+ return err;
+}
+
+/**
+ * cxgb4_ptp_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ */
+static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct adapter *adapter =
+ (struct adapter *)container_of(ptp, struct adapter,
+ ptp_clock_info);
+ struct fw_ptp_cmd c;
+ s64 sign = 1;
+ int err;
+
+ if (delta < 0)
+ sign = -1;
+
+ if (delta * sign > PTP_CLOCK_MAX_ADJTIME) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.ts.sc = FW_PTP_SC_ADJ_TIME;
+ c.u.ts.sign = (delta < 0) ? 1 : 0;
+ if (delta < 0)
+ delta = -delta;
+ c.u.ts.tm = cpu_to_be64(delta);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+ } else {
+ err = cxgb4_ptp_fineadjtime(adapter, delta);
+ }
+
+ return err;
+}
+
+/**
+ * cxgb4_ptp_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ */
+static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct adapter *adapter = (struct adapter *)container_of(ptp,
+ struct adapter, ptp_clock_info);
+ struct fw_ptp_cmd c;
+ u64 ns;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.ts.sc = FW_PTP_SC_GET_TIME;
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+ return err;
+ }
+
+ /* convert to timespec*/
+ ns = be64_to_cpu(c.u.ts.tm);
+ *ts = ns_to_timespec64(ns);
+
+ return err;
+}
+
+/**
+ * cxgb4_ptp_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset value to new base value instead of the kernel
+ * wall timer value.
+ */
+static int cxgb4_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct adapter *adapter = (struct adapter *)container_of(ptp,
+ struct adapter, ptp_clock_info);
+ struct fw_ptp_cmd c;
+ u64 ns;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.ts.sc = FW_PTP_SC_SET_TIME;
+
+ ns = timespec64_to_ns(ts);
+ c.u.ts.tm = cpu_to_be64(ns);
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+
+ return err;
+}
+
+static void cxgb4_init_ptp_timer(struct adapter *adapter)
+{
+ struct fw_ptp_cmd c;
+ int err;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
+ c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
+ c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
+
+ err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
+ if (err < 0)
+ dev_err(adapter->pdev_dev,
+ "PTP: %s error %d\n", __func__, -err);
+}
+
+/**
+ * cxgb4_ptp_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ */
+static int cxgb4_ptp_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -ENOTSUPP;
+}
+
+static const struct ptp_clock_info cxgb4_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "cxgb4_clock",
+ .max_adj = MAX_PTP_FREQ_ADJ,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = cxgb4_ptp_adjfreq,
+ .adjtime = cxgb4_ptp_adjtime,
+ .gettime64 = cxgb4_ptp_gettime,
+ .settime64 = cxgb4_ptp_settime,
+ .enable = cxgb4_ptp_enable,
+};
+
+/**
+ * cxgb4_ptp_init - initialize PTP for devices which support it
+ * @adapter: board private structure
+ *
+ * This function performs the required steps for enabling PTP support.
+ */
+void cxgb4_ptp_init(struct adapter *adapter)
+{
+ struct timespec64 now;
+ /* no need to create a clock device if we already have one */
+ if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+ return;
+
+ adapter->ptp_tx_skb = NULL;
+ adapter->ptp_clock_info = cxgb4_ptp_clock_info;
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (!adapter->ptp_clock) {
+ dev_err(adapter->pdev_dev,
+ "PTP %s Clock registration has failed\n", __func__);
+ return;
+ }
+
+ now = ktime_to_timespec64(ktime_get_real());
+ cxgb4_init_ptp_timer(adapter);
+ if (cxgb4_ptp_settime(&adapter->ptp_clock_info, &now) < 0) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ }
+}
+
+/**
+ * cxgb4_ptp_remove - disable PTP device and stop the overflow check
+ * @adapter: board private structure
+ *
+ * Stop the PTP support.
+ */
+void cxgb4_ptp_stop(struct adapter *adapter)
+{
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h
new file mode 100644
index 000000000000..cccfae84bb84
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h
@@ -0,0 +1,74 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_PTP_H__
+#define __CXGB4_PTP_H__
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PTP_FREQ_ADJ 1000000
+#define PTP_CLOCK_MAX_ADJTIME 10000000 /* 10 ms */
+
+#define PTP_MIN_LENGTH 63
+#define PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+#define PTP_EVENT_PORT 319
+
+enum ptp_rx_filter_mode {
+ PTP_TS_NONE = 0,
+ PTP_TS_L2,
+ PTP_TS_L4,
+ PTP_TS_L2_L4
+};
+
+struct port_info;
+
+static inline bool cxgb4_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+
+static inline void cxgb4_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+void cxgb4_ptp_init(struct adapter *adap);
+void cxgb4_ptp_stop(struct adapter *adap);
+bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb);
+bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb);
+int cxgb4_ptprx_timestamping(struct port_info *pi, u8 port, u16 mode);
+int cxgb4_ptp_redirect_rx_packet(struct adapter *adap, struct port_info *pi);
+int cxgb4_ptp_txtype(struct adapter *adap, u8 port_id);
+void cxgb4_ptp_read_hwstamp(struct adapter *adap, struct port_info *pi);
+bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev);
+#endif /* __CXGB4_PTP_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 8f1c874cfe21..84541fce94c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -123,12 +123,14 @@ struct tid_info {
spinlock_t stid_lock;
unsigned int stids_in_use;
+ unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ atomic_t conns_in_use;
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
};
@@ -157,13 +159,21 @@ static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
- unsigned int tid)
+ unsigned int tid, unsigned short family)
{
t->tid_tab[tid] = data;
- if (t->hash_base && (tid >= t->hash_base))
- atomic_inc(&t->hash_tids_in_use);
- else
- atomic_inc(&t->tids_in_use);
+ if (t->hash_base && (tid >= t->hash_base)) {
+ if (family == AF_INET6)
+ atomic_add(2, &t->hash_tids_in_use);
+ else
+ atomic_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == AF_INET6)
+ atomic_add(2, &t->tids_in_use);
+ else
+ atomic_inc(&t->tids_in_use);
+ }
+ atomic_inc(&t->conns_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
@@ -171,8 +181,8 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
-void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
-
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6f3692db29af..f7ef8871dd0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -146,7 +146,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
if (!skb)
return -ENOMEM;
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f05f0d400324..ede12209f20b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -52,6 +52,7 @@
#include "t4_values.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "cxgb4_ptp.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -1162,7 +1163,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0;
+ u32 wr_mid, ctrl0, op;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
@@ -1175,6 +1176,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
int len, max_pkt_len;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1198,15 +1200,31 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
- q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ if (ptp_enabled) {
+ spin_lock(&adap->ptp_lock);
+ if (!(adap->ptp_tx_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ adap->ptp_tx_skb = skb_get(skb);
+ } else {
+ spin_unlock(&adap->ptp_lock);
+ goto out_free;
+ }
+ q = &adap->sge.ptptxq;
+ } else {
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ }
+ skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP))
+ if (unlikely(err == -ENOTSUPP)) {
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
goto out_free;
+ }
#endif /* CONFIG_CHELSIO_T4_FCOE */
flits = calc_tx_flits(skb);
@@ -1218,6 +1236,8 @@ out_free: dev_kfree_skb_any(skb);
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}
@@ -1227,6 +1247,8 @@ out_free: dev_kfree_skb_any(skb);
if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
goto out_free;
}
@@ -1279,7 +1301,11 @@ out_free: dev_kfree_skb_any(skb);
q->tx_cso += ssi->gso_segs;
} else {
len += sizeof(*cpl);
- wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ if (ptp_enabled)
+ op = FW_PTP_TX_PKT_WR;
+ else
+ op = FW_ETH_TX_PKT_WR;
+ wr->op_immdlen = htonl(FW_WR_OP_V(op) |
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1301,6 +1327,8 @@ out_free: dev_kfree_skb_any(skb);
ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf);
+ if (ptp_enabled)
+ ctrl0 |= TXPKT_TSTAMP_F;
#ifdef CONFIG_CHELSIO_T4_DCB
if (is_t4(adap->params.chip))
ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
@@ -1332,6 +1360,8 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
ring_tx_db(adap, &q->q, ndesc);
+ if (ptp_enabled)
+ spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
}
@@ -2023,6 +2053,92 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
rxq->stats.rx_cso++;
}
+enum {
+ RX_NON_PTP_PKT = 0,
+ RX_PTP_PKT_SUC = 1,
+ RX_PTP_PKT_ERR = 2
+};
+
+/**
+ * t4_systim_to_hwstamp - read hardware time stamp
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Read Time Stamp from MPS packet and insert in skb which
+ * is forwarded to PTP application
+ */
+static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *hwtstamps;
+ struct cpl_rx_mps_pkt *cpl = NULL;
+ unsigned char *data;
+ int offset;
+
+ cpl = (struct cpl_rx_mps_pkt *)skb->data;
+ if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
+ X_CPL_RX_MPS_PKT_TYPE_PTP))
+ return RX_PTP_PKT_ERR;
+
+ data = skb->data + sizeof(*cpl);
+ skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
+ offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
+ if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
+ return RX_PTP_PKT_ERR;
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+
+ return RX_PTP_PKT_SUC;
+}
+
+/**
+ * t4_rx_hststamp - Recv PTP Event Message
+ * @adap: the adapter
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @skb: the packet
+ *
+ * PTP enabled and MPS packet, read HW timestamp
+ */
+static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
+ struct sge_eth_rxq *rxq, struct sk_buff *skb)
+{
+ int ret;
+
+ if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
+ !is_t4(adapter->params.chip))) {
+ ret = t4_systim_to_hwstamp(adapter, skb);
+ if (ret == RX_PTP_PKT_ERR) {
+ kfree_skb(skb);
+ rxq->stats.rx_drops++;
+ }
+ return ret;
+ }
+ return RX_NON_PTP_PKT;
+}
+
+/**
+ * t4_tx_hststamp - Loopback PTP Transmit Event Message
+ * @adap: the adapter
+ * @skb: the packet
+ * @dev: the ingress net device
+ *
+ * Read hardware timestamp for the loopback PTP Tx event message
+ */
+static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
+ cxgb4_ptp_read_hwstamp(adapter, pi);
+ kfree_skb(skb);
+ return 0;
+ }
+ return 1;
+}
+
/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
@@ -2038,11 +2154,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = q->adap;
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
u16 err_vec;
struct port_info *pi;
+ int ret = 0;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -2068,8 +2186,25 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.rx_drops++;
return 0;
}
+ pi = netdev_priv(q->netdev);
+
+ /* Handle PTP Event Rx packet */
+ if (unlikely(pi->ptp_enable)) {
+ ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
+ if (ret == RX_PTP_PKT_ERR)
+ return 0;
+ }
+ if (likely(!ret))
+ __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
+
+ /* Handle the PTP Event Tx Loopback packet */
+ if (unlikely(pi->ptp_enable && !ret &&
+ (pkt->l2info & htonl(RXF_UDP_F)) &&
+ cxgb4_ptp_is_ptp_rx(skb))) {
+ if (!t4_tx_hststamp(adapter, skb, q->netdev))
+ return 0;
+ }
- __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
@@ -2078,7 +2213,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- pi = netdev_priv(skb->dev);
if (pi->rxtstamp)
cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
si->sgetstamp);
@@ -2502,6 +2636,20 @@ static void sge_tx_timer_cb(unsigned long data)
tasklet_schedule(&txq->qresume_tsk);
}
+ if (!is_t4(adap->params.chip)) {
+ struct sge_eth_txq *q = &s->ptptxq;
+ int avail;
+
+ spin_lock(&adap->ptp_lock);
+ avail = reclaimable(&q->q);
+
+ if (avail) {
+ free_tx_desc(adap, &q->q, avail, false);
+ q->q.in_use -= avail;
+ }
+ spin_unlock(&adap->ptp_lock);
+ }
+
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover;
do {
@@ -3068,6 +3216,19 @@ void t4_free_sge_resources(struct adapter *adap)
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
+ if (!is_t4(adap->params.chip)) {
+ etq = &adap->sge.ptptxq;
+ if (etq->q.desc) {
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
+ etq->q.cntxt_id);
+ spin_lock_bh(&adap->ptp_lock);
+ free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ spin_unlock_bh(&adap->ptp_lock);
+ kfree(etq->q.sdesc);
+ free_txq(adap, &etq->q);
+ }
+ }
+
/* clear the reverse egress queue map */
memset(adap->sge.egr_map, 0,
adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3a34aa629f7d..82bf7aac6cdb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3540,7 +3540,7 @@ int t4_load_phy_fw(struct adapter *adap,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
val = phy_fw_size;
ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
- &param, &val, 1);
+ &param, &val, 1, true);
if (ret < 0)
return ret;
mtype = val >> 8;
@@ -4040,6 +4040,7 @@ static void cim_intr_handler(struct adapter *adapter)
{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
+ { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
@@ -4074,11 +4075,27 @@ static void cim_intr_handler(struct adapter *adapter)
{ 0 }
};
+ u32 val, fw_err;
int fat;
- if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
+ fw_err = t4_read_reg(adapter, PCIE_FW_A);
+ if (fw_err & PCIE_FW_ERR_F)
t4_report_fw_error(adapter);
+ /* When the Firmware detects an internal error which normally
+ * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
+ * in order to make sure the Host sees the Firmware Crash. So
+ * if we have a Timer0 interrupt and don't see a Firmware Crash,
+ * ignore the Timer0 interrupt.
+ */
+
+ val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
+ if (val & TIMER0INT_F)
+ if (!(fw_err & PCIE_FW_ERR_F) ||
+ (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
+ t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
+ TIMER0INT_F);
+
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
@@ -4445,7 +4462,7 @@ static void pl_intr_handler(struct adapter *adap)
#define PF_INTR_MASK (PFSW_F)
#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
- CPL_SWITCH_F | SGE_F | ULP_TX_F)
+ CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
/**
* t4_slow_intr_handler - control path interrupt handler
@@ -5423,30 +5440,155 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
- * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
* @adap: the adapter
- * @idx: the port index
+ * @pidx: the port index
+ *
+ * Computes and returns a bitmap indicating which MPS buffer groups are
+ * associated with the given Port. Bit i is set if buffer group i is
+ * used by the Port.
+ */
+static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
+ int pidx)
+{
+ unsigned int chip_version, nports;
+
+ chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
+
+ dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+
+ return 0;
+}
+
+/**
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
- * with the given port. Bit i is set if buffer group i is used by the
- * port.
+ * with the given Port. Bit i is set if buffer group i is used by the
+ * Port.
*/
-unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
{
- u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
+ u8 *mps_bg_map;
+ unsigned int nports;
+
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+ if (pidx >= nports) {
+ CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ /* If we've already retrieved/computed this, just return the result.
+ */
+ mps_bg_map = adapter->params.mps_bg_map;
+ if (mps_bg_map[pidx])
+ return mps_bg_map[pidx];
+
+ /* Newer Firmware can tell us what the MPS Buffer Group Map is.
+ * If we're talking to such Firmware, let it tell us. If the new
+ * API isn't supported, revert back to old hardcoded way. The value
+ * obtained from Firmware is encoded in below format:
+ *
+ * val = (( MPSBGMAP[Port 3] << 24 ) |
+ * ( MPSBGMAP[Port 2] << 16 ) |
+ * ( MPSBGMAP[Port 1] << 8 ) |
+ * ( MPSBGMAP[Port 0] << 0 ))
+ */
+ if (adapter->flags & FW_OK) {
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
+ ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
+ 0, 1, &param, &val);
+ if (!ret) {
+ int p;
+
+ /* Store the BG Map for all of the Ports in order to
+ * avoid more calls to the Firmware in the future.
+ */
+ for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
+ mps_bg_map[p] = val & 0xff;
+
+ return mps_bg_map[pidx];
+ }
+ }
- if (n == 0)
- return idx == 0 ? 0xf : 0;
- /* In T6 (which is a 2 port card),
- * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
- * For 2 port T4/T5 adapter,
- * port 0 is mapped to channel 0 and 1,
- * port 1 is mapped to channel 2 and 3.
+ /* Either we're not talking to the Firmware or we're dealing with
+ * older Firmware which doesn't support the new API to get the MPS
+ * Buffer Group Map. Fall back to computing it ourselves.
*/
- if ((n == 1) &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
+ return mps_bg_map[pidx];
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated
+ * with a given Port. Bit i is set if TP Ingress Channel i is used by
+ * the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
+
+ if (pidx >= nports) {
+ dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
}
/**
@@ -6293,13 +6435,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
+ /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+ * wont be sent when we are flashing FW.
+ */
+ adap->flags &= ~FW_OK;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
- return ret;
+ goto out;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
- return ret;
+ goto out;
/*
* Older versions of the firmware don't understand the new
@@ -6310,7 +6457,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
- return t4_fw_restart(adap, mbox, reset);
+ ret = t4_fw_restart(adap, mbox, reset);
+
+ /* Grab potentially new Firmware Device Log parameters so we can see
+ * how healthy the new Firmware is. It's okay to contact the new
+ * Firmware for these parameters even though, as far as it's
+ * concerned, we've never said "HELLO" to it ...
+ */
+ (void)t4_init_devlog_params(adap);
+out:
+ adap->flags |= FW_OK;
+ return ret;
}
/**
@@ -6546,13 +6703,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
* @params: the parameter names
* @val: the parameter values
* @rw: Write and read flag
+ * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val, int rw)
+ u32 *val, int rw, bool sleep_ok)
{
int i, ret;
struct fw_params_cmd c;
@@ -6575,7 +6733,7 @@ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
p++;
}
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
*val++ = be32_to_cpu(*p);
@@ -6586,7 +6744,16 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val)
{
- return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
+ true);
+}
+
+int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
+ false);
}
/**
@@ -7360,11 +7527,41 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->fc = fc;
lc->supported = be16_to_cpu(p->u.info.pcap);
lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
+
t4_os_link_changed(adap, pi->port_id, link_ok);
}
}
/**
+ * t4_update_port_info - retrieve and update port information if changed
+ * @pi: the port_info
+ *
+ * We issue a Get Port Information Command to the Firmware and, if
+ * successful, we check to see if anything is different from what we
+ * last recorded and update things accordingly.
+ */
+int t4_update_port_info(struct port_info *pi)
+{
+ struct fw_port_cmd port_cmd;
+ int ret;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
+ &port_cmd, sizeof(port_cmd), &port_cmd);
+ if (ret)
+ return ret;
+
+ t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
+ return 0;
+}
+
+/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
@@ -7643,10 +7840,9 @@ int t4_shutdown_adapter(struct adapter *adapter)
t4_intr_disable(adapter);
t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
for_each_port(adapter, port) {
- u32 a_port_cfg = PORT_REG(port,
- is_t4(adapter->params.chip)
- ? XGMAC_PORT_CFG_A
- : MAC_PORT_CFG_A);
+ u32 a_port_cfg = is_t4(adapter->params.chip) ?
+ PORT_REG(port, XGMAC_PORT_CFG_A) :
+ T5_PORT_REG(port, MAC_PORT_CFG_A);
t4_write_reg(adapter, a_port_cfg,
t4_read_reg(adapter, a_port_cfg)
@@ -8272,7 +8468,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
- idx = (idx + 1) & UPDBGLARDPTR_M;
+
+ /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+ * identify the 32-bit portion of the full 312-bit data
+ */
+ if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+ idx = (idx & 0xff0) + 0x10;
+ else
+ idx++;
+ /* address can't exceed 0xfff */
+ idx &= UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 8098c93cd16e..b0ff78da8aa2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -92,6 +92,7 @@ enum {
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
+ CPL_RX_MPS_PKT = 0xAF,
CPL_TRACE_PKT = 0xB0,
CPL_ISCSI_DATA = 0xB2,
@@ -807,6 +808,10 @@ struct cpl_tx_pkt {
#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
+#define TXPKT_TSTAMP_S 23
+#define TXPKT_TSTAMP_V(x) ((x) << TXPKT_TSTAMP_S)
+#define TXPKT_TSTAMP_F TXPKT_TSTAMP_V(1ULL)
+
#define TXPKT_OPCODE_S 24
#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
@@ -1875,4 +1880,27 @@ struct cpl_rx_phys_dsgl {
(((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \
CPL_RX_PHYS_DSGL_NOOFSGENTR_M)
+struct cpl_rx_mps_pkt {
+ __be32 op_to_r1_hi;
+ __be32 r1_lo_length;
+};
+
+#define CPL_RX_MPS_PKT_OP_S 24
+#define CPL_RX_MPS_PKT_OP_M 0xff
+#define CPL_RX_MPS_PKT_OP_V(x) ((x) << CPL_RX_MPS_PKT_OP_S)
+#define CPL_RX_MPS_PKT_OP_G(x) \
+ (((x) >> CPL_RX_MPS_PKT_OP_S) & CPL_RX_MPS_PKT_OP_M)
+
+#define CPL_RX_MPS_PKT_TYPE_S 20
+#define CPL_RX_MPS_PKT_TYPE_M 0xf
+#define CPL_RX_MPS_PKT_TYPE_V(x) ((x) << CPL_RX_MPS_PKT_TYPE_S)
+#define CPL_RX_MPS_PKT_TYPE_G(x) \
+ (((x) >> CPL_RX_MPS_PKT_TYPE_S) & CPL_RX_MPS_PKT_TYPE_M)
+
+enum {
+ X_CPL_RX_MPS_PKT_TYPE_PAUSE = 1 << 0,
+ X_CPL_RX_MPS_PKT_TYPE_PPP = 1 << 1,
+ X_CPL_RX_MPS_PKT_TYPE_QFC = 1 << 2,
+ X_CPL_RX_MPS_PKT_TYPE_PTP = 1 << 3
+};
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a323185507ec..99987d8e437e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -172,6 +172,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
/* T6 adapters:
*/
@@ -190,6 +192,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_ID_TABLE_FENTRY(0x6080),
CH_PCI_ID_TABLE_FENTRY(0x6081),
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 3348d33c36fa..dac90837842b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1077,6 +1077,10 @@
#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
+#define TIMER0INT_S 2
+#define TIMER0INT_V(x) ((x) << TIMER0INT_S)
+#define TIMER0INT_F TIMER0INT_V(1U)
+
#define PREFDROPINT_S 1
#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
#define PREFDROPINT_F PREFDROPINT_V(1U)
@@ -1795,6 +1799,8 @@
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define MAC_PORT_MAGIC_MACID_LO 0x824
#define MAC_PORT_MAGIC_MACID_HI 0x828
+#define MAC_PORT_TX_TS_VAL_LO 0x928
+#define MAC_PORT_TX_TS_VAL_HI 0x92c
#define MAC_PORT_EPIO_DATA0_A 0x8c0
#define MAC_PORT_EPIO_DATA1_A 0x8c4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 251a35e9795c..0ebed64d62d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -103,6 +103,7 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45,
+ FW_PTP_TX_PKT_WR = 0x46,
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
FW_LASTC2E_WR = 0x70
};
@@ -685,6 +686,7 @@ enum fw_cmd_opcodes {
FW_SCHED_CMD = 0x24,
FW_DEVLOG_CMD = 0x25,
FW_CLIP_CMD = 0x28,
+ FW_PTP_CMD = 0x3e,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
@@ -1123,6 +1125,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
+ FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
};
/*
@@ -2572,6 +2575,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR_QSFP,
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
+ FW_PORT_TYPE_KR_SFP28,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
@@ -2800,6 +2804,54 @@ struct fw_port_lb_stats_cmd {
} u;
};
+enum fw_ptp_subop {
+ /* none */
+ FW_PTP_SC_INIT_TIMER = 0x00,
+ FW_PTP_SC_TX_TYPE = 0x01,
+ /* init */
+ FW_PTP_SC_RXTIME_STAMP = 0x08,
+ FW_PTP_SC_RDRX_TYPE = 0x09,
+ /* ts */
+ FW_PTP_SC_ADJ_FREQ = 0x10,
+ FW_PTP_SC_ADJ_TIME = 0x11,
+ FW_PTP_SC_ADJ_FTIME = 0x12,
+ FW_PTP_SC_WALL_CLOCK = 0x13,
+ FW_PTP_SC_GET_TIME = 0x14,
+ FW_PTP_SC_SET_TIME = 0x15,
+};
+
+struct fw_ptp_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_ptp {
+ struct fw_ptp_sc {
+ __u8 sc;
+ __u8 r3[7];
+ } scmd;
+ struct fw_ptp_init {
+ __u8 sc;
+ __u8 txchan;
+ __be16 absid;
+ __be16 mode;
+ __be16 r3;
+ } init;
+ struct fw_ptp_ts {
+ __u8 sc;
+ __u8 sign;
+ __be16 r3;
+ __be32 ppb;
+ __be64 tm;
+ } ts;
+ } u;
+ __be64 r3;
+};
+
+#define FW_PTP_CMD_PORTID_S 0
+#define FW_PTP_CMD_PORTID_M 0xf
+#define FW_PTP_CMD_PORTID_V(x) ((x) << FW_PTP_CMD_PORTID_S)
+#define FW_PTP_CMD_PORTID_G(x) \
+ (((x) >> FW_PTP_CMD_PORTID_S) & FW_PTP_CMD_PORTID_M)
+
struct fw_rss_ind_tbl_cmd {
__be32 op_to_viid;
__be32 retval_len16;
@@ -3087,6 +3139,10 @@ struct fw_debug_cmd {
#define FW_DEBUG_CMD_TYPE_G(x) \
(((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
+enum pcie_fw_eval {
+ PCIE_FW_EVAL_CRASH = 0,
+};
+
#define PCIE_FW_ERR_S 31
#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S)
#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U)
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index 515b94ff9080..4b5aacc09cab 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -90,7 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_tid_release *req;
- req = (struct cpl_tid_release *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -104,7 +104,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_close_con_req *req;
- req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -119,7 +119,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_abort_req *req;
- req = (struct cpl_abort_req *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -134,7 +134,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_abort_rpl *rpl;
- rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+ rpl = __skb_put(skb, len);
memset(rpl, 0, len);
INIT_TP_WR(rpl, tid);
@@ -149,7 +149,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_rx_data_ack *req;
- req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index da5b58b853e2..410a0a95130b 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -450,11 +450,10 @@ skip_this_frame:
if (bp + length > lp->end_dma_buff) {
int semi_cnt = lp->end_dma_buff - bp;
- memcpy(skb_put(skb, semi_cnt), bp, semi_cnt);
- memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff,
- length - semi_cnt);
+ skb_put_data(skb, bp, semi_cnt);
+ skb_put_data(skb, lp->dma_buff, length - semi_cnt);
} else {
- memcpy(skb_put(skb, length), bp, length);
+ skb_put_data(skb, bp, length);
}
bp += (length + 3) & ~3;
if (bp >= lp->end_dma_buff)
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 7a7c02f1f8b9..e2a702996db4 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -702,7 +702,10 @@ static int ep93xx_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ep->mii, cmd);
+
+ return 0;
}
static int ep93xx_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 2b23f46b34d3..ba032ac9ae86 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.31"
+#define DRV_VERSION "2.3.0.42"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -47,7 +47,7 @@
struct enic_msix_entry {
int requested;
- char devname[IFNAMSIZ];
+ char devname[IFNAMSIZ + 8];
irqreturn_t (*isr)(int, void *);
void *devid;
cpumask_var_t affinity_mask;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4b87beeabce1..d24ee1ad3be1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1537,13 +1537,12 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[0]);
- if (rq_work_done < rq_work_to_do) {
+ if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, rq_work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1663,13 +1662,12 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
- if (work_done < work_to_do) {
+ if ((work_done < budget) && napi_complete_done(napi, work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1739,7 +1737,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%u", netdev->name, i);
+ "%s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1750,7 +1748,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%u", netdev->name, i);
+ "%s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
@@ -1758,14 +1756,14 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_err_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-err", netdev->name);
+ "%s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-notify", netdev->name);
+ "%s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 008dc8161775..16fe776ddbe5 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1171,7 +1171,7 @@ dm9000_rx(struct net_device *dev)
if (GoodPacket &&
((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2);
- rdptr = (u8 *) skb_put(skb, RxLen - 4);
+ rdptr = skb_put(skb, RxLen - 4);
/* Read received packet from RX SRAM */
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 91b8f6f5a765..c87b8cc42963 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1483,8 +1483,8 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
de_rx_missed(de, rbuf[8]);
}
-static int __de_get_link_ksettings(struct de_private *de,
- struct ethtool_link_ksettings *cmd)
+static void __de_get_link_ksettings(struct de_private *de,
+ struct ethtool_link_ksettings *cmd)
{
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
de->media_supported);
@@ -1517,8 +1517,6 @@ static int __de_get_link_ksettings(struct de_private *de,
cmd->base.autoneg = AUTONEG_ENABLE;
/* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
}
static int __de_set_link_ksettings(struct de_private *de,
@@ -1615,13 +1613,12 @@ static int de_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct de_private *de = netdev_priv(dev);
- int rc;
spin_lock_irq(&de->lock);
- rc = __de_get_link_ksettings(de, cmd);
+ __de_get_link_ksettings(de, cmd);
spin_unlock_irq(&de->lock);
- return rc;
+ return 0;
}
static int de_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index fd6bcf024729..47be5018d35d 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3624,10 +3624,10 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
skb_reserve(p, 2); /* Align */
if (index < lp->rx_old) { /* Wrapped buffer */
short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
- memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
+ skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
+ skb_put_data(p, lp->rx_bufs, len - tlen);
} else { /* Linear buffer */
- memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
+ skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
}
return p;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index ba6ae24acf62..8df80880ecaa 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -218,9 +218,9 @@ int tulip_poll(struct napi_struct *napi, int budget)
pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- tp->rx_buffers[entry].skb->data,
- pkt_len);
+ skb_put_data(skb,
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
tp->rx_buffers[entry].mapping,
@@ -444,9 +444,9 @@ static int tulip_rx(struct net_device *dev)
pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- tp->rx_buffers[entry].skb->data,
- pkt_len);
+ skb_put_data(skb,
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 8d98b259d1ba..7fc248efc4ba 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -864,9 +864,9 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
- memcpy(skb_put(skb, rxlen),
- skb_tail_pointer(rxptr->rx_skb_ptr),
- rxlen);
+ skb_put_data(skb,
+ skb_tail_pointer(rxptr->rx_skb_ptr),
+ rxlen);
uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
} else
skb_put(skb, rxlen);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index d1f2f3cc7cfa..32d7229544fa 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1395,13 +1395,12 @@ static int netdev_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 3e77dd863175..5a847941c46b 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -399,7 +399,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* 'skb_put()' points to the start of sk_buff
* data area.
*/
- data_ptr = (unsigned int *)skb_put(skb, pkt_len);
+ data_ptr = skb_put(skb, pkt_len);
for (i = 0; i < (pkt_len + 3) >> 2; i++)
*data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 278f139f2a22..4ee042c034a1 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -223,7 +223,7 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
if (skb) {
- memcpy(skb_put(skb, pkt_size), data, pkt_size);
+ skb_put_data(skb, data, pkt_size);
skb->protocol = eth_type_trans(skb, priv->net_dev);
priv->stat_rx_bytes += pkt_size;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 50566243e6fa..674cf9d13b98 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.1.0.0"
+#define DRV_VER "11.4.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 36e4232ed6b8..c967f45705d9 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -49,6 +49,9 @@
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
+/* FW has detected a UE and is dumping FAT log data */
+#define POST_STAGE_FAT_LOG_START 0x0D00
+#define POST_STAGE_ARMFW_UE 0xF000 /*FW has asserted an UE*/
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4eee18ce9be4..319eee36649b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3241,8 +3241,9 @@ void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
- u32 i;
struct device *dev = &adapter->pdev->dev;
+ u16 val;
+ u32 i;
if (be_check_error(adapter, BE_ERROR_HW))
return;
@@ -3280,15 +3281,25 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow HW to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
-
if (ue_lo || ue_hi) {
+ /* On certain platforms BE3 hardware can indicate
+ * spurious UEs. In case of a UE in the chip,
+ * the POST register correctly reports either a
+ * FAT_LOG_START state (FW is currently dumping
+ * FAT log data) or a ARMFW_UE state. Check for the
+ * above states to ascertain if the UE is valid or not.
+ */
+ if (BE3_chip(adapter)) {
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_FAT_LOG_START)
+ != POST_STAGE_FAT_LOG_START &&
+ (val & POST_STAGE_ARMFW_UE)
+ != POST_STAGE_ARMFW_UE)
+ return;
+ }
+
dev_err(dev, "Error detected in the adapter");
- if (skyhawk_chip(adapter))
- be_set_error(adapter, BE_ERROR_UE);
+ be_set_error(adapter, BE_ERROR_UE);
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 1536356e2ea8..66928a922824 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -829,7 +829,10 @@ static int ftmac100_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ftmac100 *priv = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&priv->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&priv->mii, cmd);
+
+ return 0;
}
static int ftmac100_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 766636a7c25e..e92859dab7ae 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1711,8 +1711,8 @@ static int netdev_rx(struct net_device *dev)
np->cur_rx->skbuff->data, pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- np->cur_rx->skbuff->data, pkt_len);
+ skb_put_data(skb, np->cur_rx->skbuff->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(np->pci_dev,
np->cur_rx->buffer,
@@ -1821,13 +1821,12 @@ static int netdev_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 290ad0563320..757b873735a5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -342,8 +342,8 @@ static void dpaa_get_stats64(struct net_device *net_dev,
}
}
-static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle,
+ u32 chain_index, __be16 proto, struct tc_to_netdev *tc)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
u8 num_tc;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 15571e251fb9..aad825088357 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -75,16 +75,14 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- int err;
-
if (!net_dev->phydev) {
netdev_dbg(net_dev, "phy device not initialized\n");
return 0;
}
- err = phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
- return err;
+ return 0;
}
static int dpaa_set_link_ksettings(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5ea740b4cf14..38c7b21e5d63 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -446,6 +446,10 @@ struct bufdesc_ex {
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
+/* The MIB counters should be cleared and enabled during
+ * initialisation.
+ */
+#define FEC_QUIRK_MIB_CLEAR (1 << 15)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c8649fd28f..a6e323f15637 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
}, {
.name = "imx27-fec",
- .driver_data = 0,
+ .driver_data = FEC_QUIRK_MIB_CLEAR,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -184,6 +184,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+/* MIB Control Register */
+#define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
+
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
@@ -2356,11 +2359,30 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
}
}
+static void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+
+ /* Disable MIB statistics counters */
+ writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ writel(0, fep->hwp + fec_stats[i].offset);
+
+ /* Don't disable MIB statistics counters */
+ writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
+}
+
#else /* !defined(CONFIG_M5272) */
#define FEC_STATS_SIZE 0
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
#endif /* !defined(CONFIG_M5272) */
/* ITR clock source is enet system clock (clk_ahb).
@@ -3182,7 +3204,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_restart(ndev);
- fec_enet_update_ethtool_stats(ndev);
+ if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
+ fec_enet_clear_ethtool_stats(ndev);
+ else
+ fec_enet_update_ethtool_stats(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0ff166ec3e7e..c4b4b0a1bbf0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1718,7 +1718,7 @@ static int gfar_restore(struct device *dev)
return 0;
}
-static struct dev_pm_ops gfar_pm_ops = {
+static const struct dev_pm_ops gfar_pm_ops = {
.suspend = gfar_suspend,
.resume = gfar_resume,
.freeze = gfar_suspend,
@@ -2250,7 +2250,7 @@ static int gfar_enet_open(struct net_device *dev)
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
- struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
+ struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
memset(fcb, 0, GMAC_FCB_LEN);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index b642990b549c..4df282ed22c7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -113,7 +113,9 @@ uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 04211ac73b36..7ba653af19cb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -360,6 +360,7 @@ enum hnae_loop {
MAC_INTERNALLOOP_MAC = 0,
MAC_INTERNALLOOP_SERDES,
MAC_INTERNALLOOP_PHY,
+ MAC_LOOP_PHY_NONE,
MAC_LOOP_NONE,
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index e95795b3c841..a8db27e86a11 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -150,7 +150,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
cmd->base.duplex = duplex;
if (net_dev->phydev)
- (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
@@ -259,67 +259,27 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
{
-#define COPPER_CONTROL_REG 0
-#define PHY_POWER_DOWN BIT(11)
-#define PHY_LOOP_BACK BIT(14)
- u16 val = 0;
-
- if (phy_dev->is_c45) /* c45 branch adding for XGE PHY */
- return -ENOTSUPP;
+ int err;
if (en) {
- /* speed : 1000M */
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 2);
- phy_write(phy_dev, 21, 0x1046);
-
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- /* Force Master */
- phy_write(phy_dev, 9, 0x1F00);
-
- /* Soft-reset */
- phy_write(phy_dev, 0, 0x9140);
- /* If autoneg disabled,two soft-reset operations */
- phy_write(phy_dev, 0, 0x9140);
-
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
-
- /* Default is 0x0400 */
- phy_write(phy_dev, 1, 0x418);
-
- /* Force 1000M Link, Default is 0x0200 */
- phy_write(phy_dev, 7, 0x20C);
-
- /* Powerup Fiber */
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
- val = phy_read(phy_dev, COPPER_CONTROL_REG);
- val &= ~PHY_POWER_DOWN;
- phy_write(phy_dev, COPPER_CONTROL_REG, val);
-
- /* Enable Phy Loopback */
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- val = phy_read(phy_dev, COPPER_CONTROL_REG);
- val |= PHY_LOOP_BACK;
- val &= ~PHY_POWER_DOWN;
- phy_write(phy_dev, COPPER_CONTROL_REG, val);
+ /* Doing phy loopback in offline state, phy resuming is
+ * needed to power up the device.
+ */
+ err = phy_resume(phy_dev);
+ if (err)
+ goto out;
+
+ err = phy_loopback(phy_dev, true);
} else {
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
- phy_write(phy_dev, 1, 0x400);
- phy_write(phy_dev, 7, 0x200);
-
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
- val = phy_read(phy_dev, COPPER_CONTROL_REG);
- val |= PHY_POWER_DOWN;
- phy_write(phy_dev, COPPER_CONTROL_REG, val);
-
- phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- phy_write(phy_dev, 9, 0xF00);
-
- val = phy_read(phy_dev, COPPER_CONTROL_REG);
- val &= ~PHY_LOOP_BACK;
- val |= PHY_POWER_DOWN;
- phy_write(phy_dev, COPPER_CONTROL_REG, val);
+ err = phy_loopback(phy_dev, false);
+ if (err)
+ goto out;
+
+ err = phy_suspend(phy_dev);
}
- return 0;
+
+out:
+ return err;
}
static int __lb_setup(struct net_device *ndev,
@@ -332,10 +292,9 @@ static int __lb_setup(struct net_device *ndev,
switch (loop) {
case MAC_INTERNALLOOP_PHY:
- if ((phy_dev) && (!phy_dev->is_c45)) {
- ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
- ret |= h->dev->ops->set_loopback(h, loop, 0x1);
- }
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
break;
case MAC_INTERNALLOOP_MAC:
if ((h->dev->ops->set_loopback) &&
@@ -346,17 +305,17 @@ static int __lb_setup(struct net_device *ndev,
if (h->dev->ops->set_loopback)
ret = h->dev->ops->set_loopback(h, loop, 0x1);
break;
+ case MAC_LOOP_PHY_NONE:
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
case MAC_LOOP_NONE:
- if ((phy_dev) && (!phy_dev->is_c45))
- ret |= hns_nic_config_phy_loopback(phy_dev, 0x0);
-
- if (h->dev->ops->set_loopback) {
+ if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ret |= h->dev->ops->set_loopback(h,
+ ret = h->dev->ops->set_loopback(h,
MAC_INTERNALLOOP_MAC, 0x0);
- ret |= h->dev->ops->set_loopback(h,
- MAC_INTERNALLOOP_SERDES, 0x0);
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_SERDES, 0x0);
}
break;
default:
@@ -582,13 +541,16 @@ static int __lb_run_test(struct net_device *ndev,
return ret_val;
}
-static int __lb_down(struct net_device *ndev)
+static int __lb_down(struct net_device *ndev, enum hnae_loop loop)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int ret;
- ret = __lb_setup(ndev, MAC_LOOP_NONE);
+ if (loop == MAC_INTERNALLOOP_PHY)
+ ret = __lb_setup(ndev, MAC_LOOP_PHY_NONE);
+ else
+ ret = __lb_setup(ndev, MAC_LOOP_NONE);
if (ret)
netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
__func__,
@@ -644,7 +606,8 @@ static void hns_nic_self_test(struct net_device *ndev,
if (!data[test_index]) {
data[test_index] = __lb_run_test(
ndev, (enum hnae_loop)st_param[i][0]);
- (void)__lb_down(ndev);
+ (void)__lb_down(ndev,
+ (enum hnae_loop)st_param[i][0]);
}
if (data[test_index])
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 5673b071e39d..c6164a98f257 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1281,7 +1281,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
*/
skb_reserve(ringptr->skb, 2);
- ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
+ ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
/* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
/* Note: 1st Fragment is used for the 4 byte packet status
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 945883842533..d719668a6684 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -809,7 +809,8 @@ memory_squeeze:
if (!rx_in_place) {
/* 16 byte align the data fields */
skb_reserve(skb, 2);
- memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
}
skb->protocol=eth_type_trans(skb,dev);
skb->len = pkt_len;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index e86773325cbe..8449c58f01fd 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -727,7 +727,8 @@ memory_squeeze:
dma_sync_single_for_cpu(dev->dev.parent,
(dma_addr_t)SWAP32(rbd->b_data),
PKT_BUF_SZ, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
dma_sync_single_for_device(dev->dev.parent,
(dma_addr_t)SWAP32(rbd->b_data),
PKT_BUF_SZ, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 5b88cc690c22..35865d05fccd 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -276,7 +276,7 @@ static int genmii_read_link(struct mii_phy *phy)
}
/* Generic implementation for most 10/100/1000 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
+static const struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
@@ -340,7 +340,7 @@ static int cis8201_init(struct mii_phy *phy)
return 0;
}
-static struct mii_phy_ops cis8201_phy_ops = {
+static const struct mii_phy_ops cis8201_phy_ops = {
.init = cis8201_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -420,7 +420,7 @@ static int et1011c_init(struct mii_phy *phy)
return 0;
}
-static struct mii_phy_ops et1011c_phy_ops = {
+static const struct mii_phy_ops et1011c_phy_ops = {
.init = et1011c_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -439,7 +439,7 @@ static struct mii_phy_def et1011c_phy_def = {
-static struct mii_phy_ops m88e1111_phy_ops = {
+static const struct mii_phy_ops m88e1111_phy_ops = {
.init = m88e1111_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -455,7 +455,7 @@ static struct mii_phy_def m88e1111_phy_def = {
.ops = &m88e1111_phy_ops,
};
-static struct mii_phy_ops m88e1112_phy_ops = {
+static const struct mii_phy_ops m88e1112_phy_ops = {
.init = m88e1112_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -480,7 +480,7 @@ static int ar8035_init(struct mii_phy *phy)
return 0;
}
-static struct mii_phy_ops ar8035_phy_ops = {
+static const struct mii_phy_ops ar8035_phy_ops = {
.init = ar8035_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 72ab7b6bf20b..3e0a695537e2 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -46,6 +46,8 @@
#include <asm/vio.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
#include "ibmveth.h"
@@ -808,8 +810,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
- if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
- !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ if (ret == H_SUCCESS &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
@@ -1040,6 +1041,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
dma_addr_t dma_addr;
unsigned long mss = 0;
+ /* veth doesn't handle frag_list, so linearize the skb.
+ * When GRO is enabled SKB's can have frag_list.
+ */
+ if (adapter->is_active_trunk &&
+ skb_has_frag_list(skb) && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
/*
* veth handles a maximum of 6 segments including the header, so
* we have to linearize the skb if there are more than this.
@@ -1064,9 +1074,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags = IBMVETH_BUF_VALID;
- if (skb_is_gso(skb) && adapter->fw_large_send_support)
- desc_flags |= IBMVETH_BUF_LRG_SND;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1076,6 +1083,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
+
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
}
retry_bounce:
@@ -1128,7 +1138,7 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb)) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
adapter->tx_large_packets++;
@@ -1232,6 +1242,71 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
}
}
+static void ibmveth_rx_csum_helper(struct sk_buff *skb,
+ struct ibmveth_adapter *adapter)
+{
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *iph6 = NULL;
+ __be16 skb_proto = 0;
+ u16 iphlen = 0;
+ u16 iph_proto = 0;
+ u16 tcphdrlen = 0;
+
+ skb_proto = be16_to_cpu(skb->protocol);
+
+ if (skb_proto == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ }
+
+ iphlen = iph->ihl * 4;
+ iph_proto = iph->protocol;
+ } else if (skb_proto == ETH_P_IPV6) {
+ iph6 = (struct ipv6hdr *)skb->data;
+ iphlen = sizeof(struct ipv6hdr);
+ iph_proto = iph6->nexthdr;
+ }
+
+ /* In OVS environment, when a flow is not cached, specifically for a
+ * new TCP connection, the first packet information is passed up
+ * the user space for finding a flow. During this process, OVS computes
+ * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
+ *
+ * Given that we zeroed out TCP checksum field in transmit path
+ * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
+ * OVS computed checksum will be incorrect w/o TCP pseudo checksum
+ * in the packet. This leads to OVS dropping the packet and hence
+ * TCP retransmissions are seen.
+ *
+ * So, re-compute TCP pseudo header checksum.
+ */
+ if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+ struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
+ tcphdrlen = skb->len - iphlen;
+
+ /* Recompute TCP pseudo header checksum */
+ if (skb_proto == ETH_P_IP)
+ tcph->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
+
+ /* Setup SKB fields for checksum offload */
+ skb_partial_csum_set(skb, iphlen,
+ offsetof(struct tcphdr, check));
+ skb_reset_network_header(skb);
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1239,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
- struct iphdr *iph;
u16 mss = 0;
restart_poll:
@@ -1297,17 +1371,7 @@ restart_poll:
if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
- iph = (struct iphdr *)skb->data;
-
- /* If the IP checksum is not offloaded and if the packet
- * is large send, the checksum must be rebuilt.
- */
- if (iph->check == 0xffff) {
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- }
- }
+ ibmveth_rx_csum_helper(skb, adapter);
}
if (length > netdev->mtu + ETH_HLEN) {
@@ -1626,6 +1690,13 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->hw_features |= NETIF_F_TSO;
}
+ adapter->is_active_trunk = false;
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
+ adapter->is_active_trunk = true;
+ netdev->hw_features |= NETIF_F_FRAGLIST;
+ netdev->features |= NETIF_F_FRAGLIST;
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU;
@@ -1843,7 +1914,7 @@ static struct vio_device_id ibmveth_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
-static struct dev_pm_ops ibmveth_pm_ops = {
+static const struct dev_pm_ops ibmveth_pm_ops = {
.resume = ibmveth_resume
};
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index ed8780cca982..01c587fc02c7 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -156,6 +156,7 @@ struct ibmveth_adapter {
int pool_config;
int rx_csum;
int large_send;
+ bool is_active_trunk;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c0fbeb387db4..a3e694679635 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -183,6 +183,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
wait_for_completion(&adapter->fw_done);
+
+ if (adapter->fw_done_rc) {
+ dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ adapter->fw_done_rc);
+ return -1;
+ }
return 0;
}
@@ -200,6 +206,33 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
+static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
+{
+ memset(ltb->buff, 0, ltb->size);
+
+ init_completion(&adapter->fw_done);
+ send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ wait_for_completion(&adapter->fw_done);
+
+ if (adapter->fw_done_rc) {
+ dev_info(&adapter->vdev->dev,
+ "Reset failed, attempting to free and reallocate buffer\n");
+ free_long_term_buff(adapter, ltb);
+ return alloc_long_term_buff(adapter, ltb, ltb->size);
+ }
+ return 0;
+}
+
+static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++)
+ adapter->rx_pool[i].active = 0;
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -217,6 +250,9 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
int index;
int i;
+ if (!pool->active)
+ return;
+
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
off_rxadd_subcrqs));
@@ -287,6 +323,15 @@ failure:
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
+
+ if (lpar_rc == H_CLOSED) {
+ /* Disable buffer pool replenishment and report carrier off if
+ * queue is closed. Firmware guarantees that a signal will
+ * be sent to the driver, triggering a reset.
+ */
+ deactivate_rx_pools(adapter);
+ netif_carrier_off(adapter->netdev);
+ }
}
static void replenish_pools(struct ibmvnic_adapter *adapter)
@@ -331,6 +376,35 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0;
}
+static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ int rx_scrqs;
+ int i, j, rc;
+
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+
+ rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ if (rc)
+ return rc;
+
+ for (j = 0; j < rx_pool->size; j++)
+ rx_pool->free_map[j] = j;
+
+ memset(rx_pool->rx_buff, 0,
+ rx_pool->size * sizeof(struct ibmvnic_rx_buff));
+
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ rx_pool->active = 1;
+ }
+
+ return 0;
+}
+
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -432,6 +506,34 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
+static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ int tx_scrqs;
+ int i, j, rc;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+
+ rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ if (rc)
+ return rc;
+
+ memset(tx_pool->tx_buff, 0,
+ adapter->req_tx_entries_per_subcrq *
+ sizeof(struct ibmvnic_tx_buff));
+
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
+ tx_pool->free_map[j] = j;
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+ }
+
+ return 0;
+}
+
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -518,6 +620,32 @@ static void release_error_buffers(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->error_list_lock, flags);
}
+static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_enable(&adapter->napi[i]);
+
+ adapter->napi_enabled = true;
+}
+
+static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+
+ adapter->napi_enabled = false;
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -635,12 +763,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- netdev_err(netdev, "failed to initialize sub crq irqs\n");
- return -1;
- }
-
rc = init_stats_token(adapter);
if (rc)
return rc;
@@ -674,9 +796,7 @@ static int __ibmvnic_open(struct net_device *netdev)
adapter->state = VNIC_OPENING;
replenish_pools(adapter);
-
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ ibmvnic_napi_enable(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
@@ -778,14 +898,14 @@ static int __ibmvnic_close(struct net_device *netdev)
int i;
adapter->state = VNIC_CLOSING;
- netif_tx_stop_all_queues(netdev);
- if (adapter->napi) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
- }
+ /* ensure that transmissions are stopped if called by do_reset */
+ if (adapter->resetting)
+ netif_tx_disable(netdev);
+ else
+ netif_tx_stop_all_queues(netdev);
- clean_tx_pools(adapter);
+ ibmvnic_napi_disable(adapter);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->req_tx_queues; i++)
@@ -814,6 +934,7 @@ static int __ibmvnic_close(struct net_device *netdev)
}
}
+ clean_tx_pools(adapter);
adapter->state = VNIC_CLOSED;
return rc;
}
@@ -1092,8 +1213,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED)
- netif_stop_subqueue(netdev, queue_num);
+ if (lpar_rc == H_CLOSED) {
+ /* Disable TX and report carrier off if queue is closed.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset or some other action.
+ */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
tx_send_failed++;
tx_dropped++;
@@ -1206,37 +1333,39 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
- /* remove the closed state so when we call open it appears
- * we are coming from the probed state.
- */
- adapter->state = VNIC_PROBED;
+ if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
+ adapter->state = VNIC_PROBED;
- release_resources(adapter);
- release_sub_crqs(adapter);
- release_crq_queue(adapter);
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return 0;
- rc = ibmvnic_init(adapter);
- if (rc)
- return 0;
+ /* If the adapter was in PROBE state prior to the reset,
+ * exit here.
+ */
+ if (reset_state == VNIC_PROBED)
+ return 0;
- /* If the adapter was in PROBE state prior to the reset, exit here. */
- if (reset_state == VNIC_PROBED)
- return 0;
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
- rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = VNIC_PROBED;
- return 0;
- }
+ rc = reset_tx_pools(adapter);
+ if (rc)
+ return rc;
- rtnl_lock();
- rc = init_resources(adapter);
- rtnl_unlock();
- if (rc)
- return rc;
+ rc = reset_rx_pools(adapter);
+ if (rc)
+ return rc;
- if (reset_state == VNIC_CLOSED)
- return 0;
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+ }
rc = __ibmvnic_open(netdev);
if (rc) {
@@ -1254,6 +1383,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+ netdev_notify_peers(netdev);
+
return 0;
}
@@ -1313,6 +1445,7 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
free_all_rwi(adapter);
+ mutex_unlock(&adapter->reset_lock);
return;
}
@@ -1333,6 +1466,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
return;
}
+ if (adapter->state == VNIC_PROBING) {
+ netdev_warn(netdev, "Adapter reset during probe\n");
+ adapter->init_done_rc = EAGAIN;
+ return;
+ }
+
mutex_lock(&adapter->rwi_lock);
list_for_each(entry, &adapter->rwi_list) {
@@ -1383,6 +1522,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int scrq_num = (int)(napi - adapter->napi);
int frames_processed = 0;
+
restart_poll:
while (frames_processed < budget) {
struct sk_buff *skb;
@@ -1392,6 +1532,12 @@ restart_poll:
u16 offset;
u8 flags = 0;
+ if (unlikely(adapter->resetting)) {
+ enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ napi_complete_done(napi, frames_processed);
+ return frames_processed;
+ }
+
if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
break;
next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1441,7 +1587,9 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
+
+ if (adapter->state != VNIC_CLOSING)
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1614,6 +1762,44 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
/* Routines for managing CRQs/sCRQs */
+static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ int rc;
+
+ if (scrq->irq) {
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+ scrq->irq = 0;
+ }
+
+ memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ scrq->cur = 0;
+
+ rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+ return rc;
+}
+
+static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
+{
+ int i, rc;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -2109,8 +2295,7 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
- adapter->state == VNIC_CLOSING)
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
return 1;
else
return 0;
@@ -2748,6 +2933,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ else
+ ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -2899,36 +3086,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
-static void handle_request_map_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- u8 map_id = crq->request_map_rsp.map_id;
- int tx_subcrqs;
- int rx_subcrqs;
- long rc;
- int i;
-
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-
- rc = crq->request_map_rsp.rc.code;
- if (rc) {
- dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
- adapter->map_id--;
- /* need to find and zero tx/rx_pool map_id */
- for (i = 0; i < tx_subcrqs; i++) {
- if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
- adapter->tx_pool[i].long_term_buff.map_id = 0;
- }
- for (i = 0; i < rx_subcrqs; i++) {
- if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
- adapter->rx_pool[i].long_term_buff.map_id = 0;
- }
- }
- complete(&adapter->fw_done);
-}
-
static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3153,6 +3310,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
+ adapter->from_passive_init = true;
+ complete(&adapter->init_done);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -3207,7 +3366,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
handle_query_map_rsp(crq, adapter);
break;
case REQUEST_MAP_RSP:
- handle_request_map_rsp(crq, adapter);
+ adapter->fw_done_rc = crq->request_map_rsp.rc.code;
+ complete(&adapter->fw_done);
break;
case REQUEST_UNMAP_RSP:
handle_request_unmap_rsp(crq, adapter);
@@ -3461,29 +3621,61 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
unsigned long timeout = msecs_to_jiffies(30000);
int rc;
- rc = init_crq_queue(adapter);
+ if (adapter->resetting) {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ } else {
+ rc = init_crq_queue(adapter);
+ }
+
if (rc) {
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
return rc;
}
+ adapter->from_passive_init = false;
+
init_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ return adapter->init_done_rc;
+ }
+
+ if (adapter->from_passive_init) {
+ adapter->state = VNIC_OPEN;
+ adapter->from_passive_init = false;
return -1;
}
- rc = init_sub_crqs(adapter);
+ if (adapter->resetting)
+ rc = reset_sub_crq_queues(adapter);
+ else
+ rc = init_sub_crqs(adapter);
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
+ return rc;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n");
+ release_crq_queue(adapter);
}
return rc;
}
+static struct device_attribute dev_attr_failover;
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
struct ibmvnic_adapter *adapter;
@@ -3532,17 +3724,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
- rc = ibmvnic_init(adapter);
+ do {
+ rc = ibmvnic_init(adapter);
+ if (rc && rc != EAGAIN) {
+ free_netdev(netdev);
+ return rc;
+ }
+ } while (rc == EAGAIN);
+
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+
+ rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc) {
free_netdev(netdev);
return rc;
}
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
-
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
return rc;
}
@@ -3568,12 +3769,49 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);
+ device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return 0;
}
+static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ __be64 session_token;
+ long rc;
+
+ if (!sysfs_streq(buf, "1"))
+ return -EINVAL;
+
+ rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
+ H_GET_SESSION_TOKEN, 0, 0, 0);
+ if (rc) {
+ netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
+ rc);
+ return -EINVAL;
+ }
+
+ session_token = (__be64)retbuf[0];
+ netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
+ be64_to_cpu(session_token));
+ rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+ H_SESSION_ERR_DETECTED, session_token, 0, 0);
+ if (rc) {
+ netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
+ rc);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(failover, 0200, NULL, failover_store);
+
static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
@@ -3610,6 +3848,9 @@ static int ibmvnic_resume(struct device *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
+ if (adapter->state != VNIC_OPEN)
+ return 0;
+
/* kick the interrupt handlers just in case we lost an interrupt */
for (i = 0; i < adapter->req_rx_queues; i++)
ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4702b48cfa44..8eff6e15f4bb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -595,7 +595,7 @@ struct ibmvnic_request_map_rsp {
u8 cmd;
u8 reserved1;
u8 map_id;
- u8 reserved2[4];
+ u8 reserved2[8];
struct ibmvnic_rc rc;
} __packed __aligned(8);
@@ -925,6 +925,7 @@ enum vnic_state {VNIC_PROBING = 1,
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
+ VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT};
struct ibmvnic_rwi {
@@ -987,6 +988,7 @@ struct ibmvnic_adapter {
spinlock_t error_list_lock;
struct completion fw_done;
+ int fw_done_rc;
/* partner capabilities */
u64 min_tx_queues;
@@ -1031,4 +1033,5 @@ struct ibmvnic_adapter {
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
+ bool napi_enabled, from_passive_init;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1542a2158e96..1feb54b6d92e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -236,12 +236,14 @@ config I40E_DCB
If unsure, say N.
config I40EVF
- tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ tristate "Intel(R) Ethernet Adaptive Virtual Function support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) XL710 and X710 virtual functions.
- For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide that can be located at:
+ This driver supports virtual functions for Intel XL710,
+ X710, X722, and all devices advertising support for Intel
+ Ethernet Adaptive Virtual Function devices. For more
+ information on how to identify your adapter, go to the Adapter
+ & Driver ID Guide that can be located at:
<http://support.intel.com>
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 2b7323d392dc..4d10270ddf8f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2430,7 +2430,10 @@ static int e100_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&nic->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&nic->mii, cmd);
+
+ return 0;
}
static int e100_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bd8b05fe8258..98375e1e1185 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4345,7 +4345,7 @@ static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
length, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, length), data, length);
+ skb_put_data(skb, data, length);
return skb;
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c7c994eb410e..98e68888abb1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -268,6 +268,7 @@ struct e1000_adapter {
u32 tx_fifo_size;
u32 tx_dma_failed;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
/* Rx */
bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index e23dbd9190d6..003cbd605799 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -105,6 +105,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ E1000_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -2072,7 +2073,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
pm_runtime_get_sync(netdev->dev.parent);
- e1000e_get_stats64(netdev, &net_stats);
+ dev_get_stats(netdev, &net_stats);
pm_runtime_put_sync(netdev->dev.parent);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b3679728caac..2dcb5463d9b8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1183,6 +1183,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1191,9 +1192,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -3680,6 +3686,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
* Delay Request messages but not both so fall-through to
* time stamp all packets.
*/
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
is_l2 = true;
is_l4 = true;
@@ -5860,17 +5867,20 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags);
if (count) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
- !adapter->tx_hwtstamp_skb) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
- adapter->tx_hwtstamp_skb = skb_get(skb);
- adapter->tx_hwtstamp_start = jiffies;
- schedule_work(&adapter->tx_hwtstamp_work);
- } else {
- skb_tx_timestamp(skb);
+ (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ if (!adapter->tx_hwtstamp_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
}
+ skb_tx_timestamp(skb);
+
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
@@ -6630,12 +6640,17 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}
static int e1000e_pm_resume(struct device *dev)
@@ -6733,20 +6748,20 @@ static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_rx(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_intr_msix_rx(msix_irq, netdev);
enable_irq(msix_irq);
vector++;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_tx(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_intr_msix_tx(msix_irq, netdev);
enable_irq(msix_irq);
vector++;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_msix_other(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_msix_other(msix_irq, netdev);
enable_irq(msix_irq);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 24f2f6f86f5a..5e37387c7082 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1265,8 +1265,8 @@ err_queueing_scheme:
return err;
}
-static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 44d9610f7a15..d616f698e155 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -57,7 +57,7 @@
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
#include "i40e_dcb.h"
@@ -103,6 +103,12 @@
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
+#define I40E_OEM_EETRACK_ID 0xffffffff
+#define I40E_OEM_GEN_SHIFT 24
+#define I40E_OEM_SNAP_MASK 0x00ff0000
+#define I40E_OEM_SNAP_SHIFT 16
+#define I40E_OEM_RELEASE_MASK 0x0000ffff
+
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
#define I40E_CURRENT_NVM_VERSION_LO 0x40
@@ -503,10 +509,12 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
@@ -514,9 +522,8 @@ struct i40e_pf {
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
- /* These are only valid in NPAR modes */
- u32 npar_max_bw;
- u32 npar_min_bw;
+ u32 max_bw;
+ u32 min_bw;
u32 ioremap_len;
u32 fd_inv;
@@ -627,6 +634,7 @@ struct i40e_vsi {
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ struct i40e_ring **xdp_rings; /* XDP Tx rings */
u32 active_filters;
u32 promisc_threshold;
@@ -643,6 +651,8 @@ struct i40e_vsi {
u16 max_frame;
u16 rx_buf_len;
+ struct bpf_prog *xdp_prog;
+
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
int num_q_vectors;
@@ -730,22 +740,36 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
{
static char buf[32];
u32 full_ver;
- u8 ver, patch;
- u16 build;
full_ver = hw->nvm.oem_ver;
- ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
- build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
- I40E_OEM_VER_BUILD_MASK);
- patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
-
- snprintf(buf, sizeof(buf),
- "%x.%02x 0x%x %d.%d.%d",
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
- I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
- I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack, ver, build, patch);
+
+ if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) {
+ u8 gen, snap;
+ u16 release;
+
+ gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT);
+ snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >>
+ I40E_OEM_SNAP_SHIFT);
+ release = (u16)(full_ver & I40E_OEM_RELEASE_MASK);
+
+ snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release);
+ } else {
+ u8 ver, patch;
+ u16 build;
+
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
+ I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
+
+ snprintf(buf, sizeof(buf),
+ "%x.%02x 0x%x %d.%d.%d",
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack, ver, build, patch);
+ }
return buf;
}
@@ -956,7 +980,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
#endif /* CONFIG_I40E_DCB */
-void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_rx_hang(struct i40e_pf *pf);
+void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
@@ -965,8 +990,13 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+
+static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 5eb04114e13f..5d5f422cbae5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -531,7 +531,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index c3b81a97558e..1b1e2acbd07f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -273,8 +273,8 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
if (!cdev || !cdev->client)
goto out;
if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
- dev_info(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
goto out;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
@@ -565,7 +565,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_hw *hw = &pf->hw;
i40e_status err;
- err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
@@ -595,6 +595,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
size = sizeof(struct i40e_qvlist_info) +
(sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ if (!ldev->qvlist_info)
+ return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
for (i = 0; i < qvlist_info->num_vectors; i++) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 24f020655291..8e082a946411 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,7 +27,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/**
* i40e_set_mac_type - Sets MAC type
@@ -3614,11 +3614,15 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call cpu_to_le16 to convert from Host byte order to
+ * Little Endian order.
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 0fab3a9b51d9..55079fe3ed63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -390,6 +390,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
@@ -618,14 +620,17 @@ static void i40e_cee_to_dcb_v1_config(
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 894c8e57ba00..9692a5294fa3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("arq_overflows", arq_overflows),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
@@ -1298,6 +1299,17 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}
+static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
+{
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ return index < vsi->num_queue_pairs ||
+ (index >= vsi->alloc_queue_pairs &&
+ index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
+ }
+
+ return index < vsi->num_queue_pairs;
+}
+
static int i40e_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -1307,6 +1319,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
+ u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -1344,6 +1357,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i]->count = new_rx_count;
+ if (i40e_enabled_xdp_vsi(vsi))
+ vsi->xdp_rings[i]->count = new_tx_count;
}
goto done;
}
@@ -1353,20 +1368,24 @@ static int i40e_set_ringparam(struct net_device *netdev,
* to the Tx and Rx ring structs.
*/
- /* alloc updated Tx resources */
+ /* alloc updated Tx and XDP Tx resources */
+ tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0]->count, new_tx_count);
- tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ tx_rings = kcalloc(tx_alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- /* clone ring and setup updated count */
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
+
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
/* the desc and bi pointers will be reallocated in the
@@ -1378,6 +1397,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (err) {
while (i) {
i--;
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
i40e_free_tx_resources(&tx_rings[i]);
}
kfree(tx_rings);
@@ -1445,9 +1466,11 @@ rx_unwind:
i40e_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(vsi->tx_rings[i]);
- *vsi->tx_rings[i] = tx_rings[i];
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i)) {
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
+ }
}
kfree(tx_rings);
tx_rings = NULL;
@@ -1478,8 +1501,10 @@ rx_unwind:
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++)
- i40e_free_tx_resources(&tx_rings[i]);
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i))
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
kfree(tx_rings);
tx_rings = NULL;
}
@@ -2686,6 +2711,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
u8 flow_pctype = 0;
u64 i_set, i_setc;
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index b077ef8b00fa..2d1253c5b7a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -762,7 +762,7 @@ int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
(fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
struct fcoe_crc_eof *crc = NULL;
- crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc = skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
} else {
/* otherwise, drop the header only frame */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a7a4b28b4144..2db93d3f6d23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/pci.h>
+#include <linux/bpf.h>
/* Local includes */
#include "i40e.h"
@@ -407,6 +408,27 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
}
/**
+ * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
+ * @ring: Tx ring to get statistics from
+ * @stats: statistics entry to be updated
+ **/
+static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+}
+
+/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
*
@@ -436,15 +458,8 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+ i40e_get_netdev_stats_struct_tx(tx_ring, stats);
- do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- packets = tx_ring->stats.packets;
- bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
@@ -455,6 +470,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
@@ -2263,9 +2281,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) ||
- (promisc_changed &&
- test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
+
+ if ((changed_flags & IFF_PROMISC) || promisc_changed) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
@@ -2396,6 +2413,18 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: the vsi
+ **/
+static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+ else
+ return I40E_RXBUFFER_3072;
+}
+
+/**
* i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2408,6 +2437,13 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
+ }
+
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
@@ -2794,6 +2830,12 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
+
return err;
}
@@ -2807,12 +2849,17 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
- if (!vsi->tx_rings)
- return;
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- i40e_free_tx_resources(vsi->tx_rings[i]);
+ if (vsi->xdp_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ i40e_free_tx_resources(vsi->xdp_rings[i]);
+ }
}
/**
@@ -3073,6 +3120,12 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
+
return err;
}
@@ -3217,6 +3270,7 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
**/
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
+ bool has_xdp = i40e_enabled_xdp_vsi(vsi);
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u16 vector;
@@ -3247,28 +3301,40 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
u32 val;
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(qp), val);
+ if (has_xdp) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_RX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
/* Terminate the linked list */
if (q == (q_vector->num_ringpairs - 1))
- val |= (I40E_QUEUE_END_OF_LIST
- << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ val |= (I40E_QUEUE_END_OF_LIST <<
+ I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(qp), val);
qp++;
@@ -3322,6 +3388,7 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
+ u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
@@ -3342,12 +3409,22 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), val);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
@@ -3511,11 +3588,24 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int base = vsi->base_vector;
int i;
+ /* disable interrupt causation from each queue */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+ u32 val;
+
+ val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+ wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
}
+ /* disable each interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
@@ -3594,10 +3684,10 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->sw_int_count++;
if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -3816,6 +3906,16 @@ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
+ /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ q_vector->tx.count++;
+ }
+
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
@@ -3995,6 +4095,33 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
+ * @seid: VSI SEID
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @is_xdp: true if the queue is used for XDP
+ * @enable: start or stop the queue
+ **/
+static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
+{
+ int ret;
+
+ i40e_control_tx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_txq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d %sTx ring %d %sable timeout\n",
+ seid, (is_xdp ? "XDP " : ""), pf_q,
+ (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4006,16 +4133,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- i40e_control_tx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
- /* wait for the change to finish */
- ret = i40e_pf_txq_wait(pf, pf_q, enable);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "VSI seid %d Tx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret)
break;
- }
}
return ret;
@@ -4527,7 +4658,21 @@ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
vsi->seid, pf_q);
return ret;
}
- /* Check and wait for the Tx queue */
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto wait_rx;
+
+ /* Check and wait for the XDP Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
+ false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+wait_rx:
+ /* Check and wait for the Rx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -5446,6 +5591,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_clean_tx_ring(vsi->xdp_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -5509,7 +5656,8 @@ exit:
return ret;
}
-static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
+ u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
@@ -6372,7 +6520,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
i40e_update_veb_stats(pf->veb[i]);
}
- i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
+ i40e_ptp_rx_hang(pf);
+ i40e_ptp_tx_hang(pf);
}
/**
@@ -6417,9 +6566,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
if (reset_flags &&
!test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, pf->state)) {
- rtnl_lock();
- i40e_do_reset(pf, reset_flags, true);
- rtnl_unlock();
+ i40e_do_reset(pf, reset_flags, false);
}
}
@@ -6968,6 +7115,51 @@ static void i40e_send_version(struct i40e_pf *pf)
}
/**
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+ u16 block_offset = 0xffff;
+ u16 block_length = 0;
+ u16 capabilities = 0;
+ u16 gen_snap = 0;
+ u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
+#define I40E_NVM_OEM_GEN_OFFSET 0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
+#define I40E_NVM_OEM_LENGTH 3
+
+ /* Check if pointer to OEM version block is valid. */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+ if (block_offset == 0xffff)
+ return;
+
+ /* Check if OEM version block has correct length. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+ &block_length);
+ if (block_length < I40E_NVM_OEM_LENGTH)
+ return;
+
+ /* Check if OEM version format is as expected. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+ &capabilities);
+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+ return;
+
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+ &gen_snap);
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+ &release);
+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
* i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
**/
@@ -7014,6 +7206,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
+ i40e_get_oem_version(&pf->hw);
/* re-verify the eeprom if we just had an EMP reset */
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
@@ -7513,15 +7706,22 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
**/
static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
{
+ struct i40e_ring **next_rings;
int size;
int ret = 0;
- /* allocate memory for both Tx and Rx ring pointers */
- size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
vsi->tx_rings = kzalloc(size, GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ vsi->xdp_rings = next_rings;
+ next_rings += vsi->alloc_queue_pairs;
+ }
+ vsi->rx_rings = next_rings;
if (alloc_qvectors) {
/* allocate memory for q_vector pointers */
@@ -7641,6 +7841,7 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
kfree(vsi->tx_rings);
vsi->tx_rings = NULL;
vsi->rx_rings = NULL;
+ vsi->xdp_rings = NULL;
}
/**
@@ -7724,6 +7925,8 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
+ if (vsi->xdp_rings)
+ vsi->xdp_rings[i] = NULL;
}
}
}
@@ -7734,43 +7937,61 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
- struct i40e_ring *tx_ring, *rx_ring;
+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
struct i40e_pf *pf = vsi->back;
- int i;
+ struct i40e_ring *ring;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
/* allocate space for both Tx and Rx in one shot */
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
- if (!tx_ring)
+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!ring)
goto err_out;
- tx_ring->queue_index = i;
- tx_ring->reg_idx = vsi->base_queue + i;
- tx_ring->ring_active = false;
- tx_ring->vsi = vsi;
- tx_ring->netdev = vsi->netdev;
- tx_ring->dev = &pf->pdev->dev;
- tx_ring->count = vsi->num_desc;
- tx_ring->size = 0;
- tx_ring->dcb_tc = 0;
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
- tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- tx_ring->tx_itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = tx_ring;
-
- rx_ring = &tx_ring[1];
- rx_ring->queue_index = i;
- rx_ring->reg_idx = vsi->base_queue + i;
- rx_ring->ring_active = false;
- rx_ring->vsi = vsi;
- rx_ring->netdev = vsi->netdev;
- rx_ring->dev = &pf->pdev->dev;
- rx_ring->count = vsi->num_desc;
- rx_ring->size = 0;
- rx_ring->dcb_tc = 0;
- rx_ring->rx_itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = rx_ring;
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->tx_rings[i] = ring++;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto setup_rx;
+
+ ring->queue_index = vsi->alloc_queue_pairs + i;
+ ring->reg_idx = vsi->base_queue + ring->queue_index;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = NULL;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ set_ring_xdp(ring);
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->xdp_rings[i] = ring++;
+
+setup_rx:
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ ring->rx_itr_setting = pf->rx_itr_default;
+ vsi->rx_rings[i] = ring;
}
return 0;
@@ -8572,10 +8793,10 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
- * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
i40e_status status;
bool min_valid, max_valid;
@@ -8586,27 +8807,27 @@ i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
if (!status) {
if (min_valid)
- pf->npar_min_bw = min_bw;
+ pf->min_bw = min_bw;
if (max_valid)
- pf->npar_max_bw = max_bw;
+ pf->max_bw = max_bw;
}
return status;
}
/**
- * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
- bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
- bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
/* Set the new bandwidths */
status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
@@ -8615,10 +8836,10 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
@@ -8737,16 +8958,19 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- if (i40e_get_npar_bw_setting(pf))
+ if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
- "Could not get NPAR bw settings\n");
- else
+ "Could not get partition bw settings\n");
+ } else {
dev_info(&pf->pdev->dev,
- "Min BW = %8.8x, Max BW = %8.8x\n",
- pf->npar_min_bw, pf->npar_max_bw);
+ "Partition BW Min = %8.8x, Max = %8.8x\n",
+ pf->min_bw, pf->max_bw);
+
+ /* nudge the Tx scheduler */
+ i40e_set_partition_bw_setting(pf);
+ }
}
- /* FW/NVM is not yet fixed in this regard */
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
@@ -8849,10 +9073,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
mutex_init(&pf->switch_mutex);
- /* If NPAR is enabled nudge the Tx scheduler */
- if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
- i40e_set_npar_bw_setting(pf);
-
sw_init_done:
return err;
}
@@ -9309,6 +9529,72 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+/**
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
+ **/
+static int i40e_xdp_setup(struct i40e_vsi *vsi,
+ struct bpf_prog *prog)
+{
+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct i40e_pf *pf = vsi->back;
+ struct bpf_prog *old_prog;
+ bool need_reset;
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+ if (frame_size > vsi->rx_buf_len)
+ return -EINVAL;
+
+ if (!i40e_enabled_xdp_vsi(vsi) && !prog)
+ return 0;
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+
+ if (need_reset)
+ i40e_prep_for_reset(pf, true);
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset)
+ i40e_reset_and_rebuild(pf, true, true);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/**
+ * i40e_xdp - implements ndo_xdp for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+static int i40e_xdp(struct net_device *dev,
+ struct netdev_xdp *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return i40e_xdp_setup(vsi, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -9341,6 +9627,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+ .ndo_xdp = i40e_xdp,
};
/**
@@ -9909,6 +10196,7 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ u16 alloc_queue_pairs;
struct i40e_pf *pf;
u8 enabled_tc;
int ret;
@@ -9927,11 +10215,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
if (ret)
goto err_vsi;
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err %d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -9987,6 +10278,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
+ u16 alloc_queue_pairs;
int ret, i;
int v_idx;
@@ -10074,12 +10366,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
- vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err=%d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -11097,6 +11391,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_get_oem_version(hw);
/* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -11609,11 +11904,8 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, pf->state)) {
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
- }
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ i40e_prep_for_reset(pf, false);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -11679,9 +11971,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- rtnl_lock();
- i40e_handle_reset_warning(pf, true);
- rtnl_unlock();
+ i40e_handle_reset_warning(pf, false);
}
/**
@@ -11761,9 +12051,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11795,9 +12083,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
@@ -11843,9 +12129,7 @@ static int i40e_resume(struct pci_dev *pdev)
/* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_reset_and_rebuild(pf, false, true);
- rtnl_unlock();
+ i40e_reset_and_rebuild(pf, false, false);
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index c56d976cf85a..df613ea40313 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -29,7 +29,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -333,10 +333,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 18c1cc08da97..1a0be835fa06 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -269,6 +269,7 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @pf: The PF private data structure
* @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
@@ -276,9 +277,8 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
**/
-void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+void i40e_ptp_rx_hang(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
unsigned int i, cleared = 0;
@@ -328,6 +328,36 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
}
/**
+ * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung
+ * @pf: The PF private data structure
+ *
+ * This watchdog task is run periodically to make sure that we clear the Tx
+ * timestamp logic if we don't obtain a timestamp in a reasonable amount of
+ * time. It is unexpected in the normal case but if it occurs it results in
+ * permanently prevent timestamps of future packets
+ **/
+void i40e_ptp_tx_hang(struct i40e_pf *pf)
+{
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ return;
+
+ /* Nothing to do if we're not already waiting for a timestamp */
+ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state))
+ return;
+
+ /* We already have a handler routine which is run when we are notified
+ * of a Tx timestamp in the hardware. If we don't get an interrupt
+ * within a second it is reasonable to assume that we never will.
+ */
+ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ pf->tx_hwtstamp_timeouts++;
+ }
+}
+
+/**
* i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
* @pf: Board private structure
*
@@ -338,6 +368,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = pf->ptp_tx_skb;
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
@@ -353,12 +384,19 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
ns = (((u64)hi) << 32) | lo;
-
i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
- skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(pf->ptp_tx_skb);
+
+ /* Clear the bit lock as soon as possible after reading the register,
+ * and prior to notifying the stack via skb_tstamp_tx(). Otherwise
+ * applications might wake up and attempt to request another transmit
+ * timestamp prior to the bit lock being cleared.
+ */
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -562,6 +600,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
}
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 77115c25d96f..b936febc315a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -26,6 +26,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
+#include <linux/bpf_trace.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -629,6 +630,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
+ else if (ring_is_xdp(ring))
+ page_frag_free(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
@@ -770,8 +773,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
total_bytes += tx_buf->bytecount;
total_packets += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb/XDP data */
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -847,6 +853,9 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
/* notify netdev of completed buffers */
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -1195,6 +1204,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1241,6 +1251,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
return 0;
err:
kfree(rx_ring->rx_bi);
@@ -1593,6 +1605,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
+ * @rx_desc: pointer to the EOP Rx descriptor
*
* Also address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
@@ -1602,8 +1615,25 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ union i40e_rx_desc *rx_desc)
+
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc,
+ BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -1776,7 +1806,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -1784,9 +1814,9 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1796,9 +1826,9 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
@@ -1811,10 +1841,11 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+ headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
@@ -1841,16 +1872,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1860,12 +1891,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
- skb = build_skb(va - I40E_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1944,6 +1975,75 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
return true;
}
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED 1
+#define I40E_XDP_TX 2
+
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring);
+
+/**
+ * i40e_run_xdp - run an XDP program
+ * @rx_ring: Rx ring being processed
+ * @xdp: XDP buffer containing the frame
+ **/
+static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_ring(xdp, xdp_ring);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+/**
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_ring: Rx ring
+ * @rx_buffer: Rx buffer to adjust
+ * @size: Size of adjustment
+ **/
+static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
+
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
@@ -1961,11 +2061,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure = false, xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2006,12 +2107,32 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_hard_start = xdp.data -
+ i40e_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = i40e_run_xdp(rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -I40E_XDP_TX) {
+ xdp_xmit = true;
+ i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_packets++;
+ } else if (skb) {
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, rx_buffer, size);
- else
- skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+ } else if (ring_uses_build_skb(rx_ring)) {
+ skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+ } else {
+ skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2026,18 +2147,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
- */
- if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- if (i40e_cleanup_headers(rx_ring, skb)) {
+ if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
skb = NULL;
continue;
}
@@ -2063,6 +2173,19 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct i40e_ring *xdp_ring;
+
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+
+ writel(xdp_ring->next_to_use, xdp_ring->tail);
+ }
+
rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
@@ -2629,8 +2752,10 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (pf->ptp_tx &&
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_start = jiffies;
pf->ptp_tx_skb = skb_get(skb);
} else {
+ pf->tx_hwtstamp_skipped++;
return 0;
}
@@ -2933,10 +3058,12 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @hdr_len: size of the packet header
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
+ *
+ * Returns 0 on success, -1 on failure to DMA
**/
-static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -3094,7 +3221,7 @@ do_rs:
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_info(tx_ring->dev, "TX DMA map failed\n");
@@ -3111,6 +3238,61 @@ dma_error:
}
tx_ring->next_to_use = i;
+
+ return -1;
+}
+
+/**
+ * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
+ * @xdp: data to transmit
+ * @xdp_ring: XDP Tx ring
+ **/
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u16 i = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return I40E_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return I40E_XDP_CONSUMED;
+
+ tx_bi = &xdp_ring->tx_bi[i];
+ tx_bi->bytecount = size;
+ tx_bi->gso_segs = 1;
+ tx_bi->raw_buf = xdp->data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc = I40E_TX_DESC(xdp_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
+ | I40E_TXD_CMD,
+ 0, size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_bi->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return I40E_XDP_TX;
}
/**
@@ -3211,8 +3393,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
*/
i40e_atr(tx_ring, skb, tx_flags);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
+ if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
@@ -3220,6 +3403,15 @@ out_drop:
i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
+ struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
+
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ }
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f5de51124cae..b288d58313a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -360,6 +360,7 @@ struct i40e_ring {
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
+ struct bpf_prog *xdp_prog;
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
@@ -395,6 +396,7 @@ struct i40e_ring {
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define I40E_TXR_FLAGS_XDP BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
@@ -437,6 +439,16 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}
+static inline bool ring_is_xdp(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_TXR_FLAGS_XDP);
+}
+
+static inline void set_ring_xdp(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_TXR_FLAGS_XDP;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
deleted file mode 100644
index 8552192a5bde..000000000000
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ /dev/null
@@ -1,449 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_IWARP = 20,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- i40e_status v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[ETH_ALEN];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[ETH_ALEN];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the RSS fields in
- * the VF resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
- * A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
- * PF configures interrupt mapping and returns status.
- */
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
-*/
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
-
-struct i40e_virtchnl_iwarp_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct i40e_virtchnl_iwarp_qvlist_info {
- u32 num_vectors;
- struct i40e_virtchnl_iwarp_qv_info qv_info[1];
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 0fb38ca78900..ecbe40ea8ffe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -39,7 +39,7 @@
* send a message to all VFs on a given PF
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg,
u16 msglen)
{
@@ -70,14 +70,14 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
**/
static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
@@ -85,9 +85,10 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = ls->link_speed;
+ pfe.event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)ls->link_speed;
}
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -113,12 +114,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
**/
void i40e_vc_notify_reset(struct i40e_pf *pf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
/**
@@ -129,7 +130,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
**/
void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
int abs_vf_id;
/* validate the request */
@@ -143,11 +144,11 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe,
- sizeof(struct i40e_virtchnl_pf_event), NULL);
+ sizeof(struct virtchnl_pf_event), NULL);
}
/***********************misc routines*****************************/
@@ -250,7 +251,7 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
* configure irq link list from the map
**/
static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
- struct i40e_virtchnl_vector_map *vecmap)
+ struct virtchnl_vector_map *vecmap)
{
unsigned long linklistmap = 0, tempmap;
struct i40e_pf *pf = vf->pf;
@@ -338,7 +339,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
/* if the vf is running in polling mode and using interrupt zero,
* need to disable auto-mask on enabling zero interrupt for VFs.
*/
- if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+ if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
(vector_id == 0)) {
reg = rd32(hw, I40E_GLINT_CTL);
if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
@@ -359,7 +360,7 @@ irq_list_done:
static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
@@ -368,7 +369,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
- struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_iwarp_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
@@ -409,17 +410,17 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
* Return 0 on success or < 0 on error
**/
static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
+ struct virtchnl_iwarp_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
- size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
- (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
@@ -492,7 +493,7 @@ err:
**/
static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
- struct i40e_virtchnl_txq_info *info)
+ struct virtchnl_txq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
@@ -569,7 +570,7 @@ error_context:
**/
static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
- struct i40e_virtchnl_rxq_info *info)
+ struct virtchnl_rxq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
@@ -1017,7 +1018,7 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* after VF has been fully initialized, because the VF driver may
* request resources immediately after setting this flag.
*/
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
@@ -1461,7 +1462,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
* send resp msg to VF
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
- enum i40e_virtchnl_ops opcode,
+ enum virtchnl_ops opcode,
i40e_status retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
@@ -1475,18 +1476,17 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
**/
static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_virtchnl_version_info info = {
- I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
};
- vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
- if (VF_IS_V10(vf))
- info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info,
- sizeof(struct
- i40e_virtchnl_version_info));
+ sizeof(struct virtchnl_version_info));
}
/**
@@ -1499,7 +1499,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
**/
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_virtchnl_vf_resource *vfres = NULL;
+ struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
@@ -1512,8 +1512,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
goto err;
}
- len = (sizeof(struct i40e_virtchnl_vf_resource) +
- sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+ len = (sizeof(struct virtchnl_vf_resource) +
+ sizeof(struct virtchnl_vsi_resource) * num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
@@ -1521,50 +1521,48 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- if (VF_IS_V11(vf))
+ if (VF_IS_V11(&vf->vf_ver))
vf->driver_caps = *(u32 *)msg;
else
- vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
- vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP)
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
@@ -1572,13 +1570,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_PARAM;
goto err;
}
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
}
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
}
vfres->num_vsis = num_vsis;
@@ -1589,7 +1587,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
@@ -1601,7 +1599,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
err:
/* send the response back to the VF */
- ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
aq_ret, (u8 *)vfres, len);
kfree(vfres);
@@ -1655,8 +1653,8 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_promisc_info *info =
- (struct i40e_virtchnl_promisc_info *)msg;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
@@ -1683,7 +1681,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
goto error_param;
}
/* Multicast promiscuous handling*/
- if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
if (vf->port_vlan_id) {
@@ -1734,7 +1732,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
}
- if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true;
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
@@ -1788,7 +1786,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
aq_ret);
}
@@ -1803,9 +1801,9 @@ error_param:
**/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vsi_queue_config_info *qci =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- struct i40e_virtchnl_queue_pair_info *qpi;
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id;
i40e_status aq_ret = 0;
@@ -1845,7 +1843,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
aq_ret);
}
@@ -1860,9 +1858,9 @@ error_param:
**/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_irq_map_info *irqmap_info =
- (struct i40e_virtchnl_irq_map_info *)msg;
- struct i40e_virtchnl_vector_map *map;
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
u16 vsi_id, vsi_queue_id, vector_id;
i40e_status aq_ret = 0;
unsigned long tempmap;
@@ -1908,7 +1906,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
aq_ret);
}
@@ -1922,8 +1920,8 @@ error_param:
**/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
@@ -1947,7 +1945,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
aq_ret);
}
@@ -1962,8 +1960,8 @@ error_param:
**/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
@@ -1986,7 +1984,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
aq_ret);
}
@@ -2000,8 +1998,8 @@ error_param:
**/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
i40e_status aq_ret = 0;
@@ -2029,7 +2027,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response back to the VF */
- return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -2088,8 +2086,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
**/
static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_ether_addr_list *al =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
@@ -2143,7 +2141,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret);
}
@@ -2157,8 +2155,8 @@ error_param:
**/
static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_ether_addr_list *al =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
@@ -2203,7 +2201,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret);
}
@@ -2217,8 +2215,8 @@ error_param:
**/
static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id;
@@ -2277,7 +2275,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
}
/**
@@ -2290,8 +2288,8 @@ error_param:
**/
static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id;
@@ -2335,7 +2333,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
}
/**
@@ -2363,7 +2361,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
aq_ret);
}
@@ -2379,8 +2377,8 @@ error_param:
static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
bool config)
{
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
- (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
@@ -2399,8 +2397,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2414,8 +2412,8 @@ error_param:
**/
static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_key *vrk =
- (struct i40e_virtchnl_rss_key *)msg;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrk->vsi_id;
@@ -2432,7 +2430,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
err:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
aq_ret);
}
@@ -2446,8 +2444,8 @@ err:
**/
static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_lut *vrl =
- (struct i40e_virtchnl_rss_lut *)msg;
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrl->vsi_id;
@@ -2464,7 +2462,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
aq_ret);
}
@@ -2478,7 +2476,7 @@ err:
**/
static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
int len = 0;
@@ -2487,7 +2485,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto err;
}
- len = sizeof(struct i40e_virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hena);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -2498,7 +2496,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
vrh->hena = i40e_pf_get_default_rss_hena(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
@@ -2514,8 +2512,8 @@ err:
**/
static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_hena *vrh =
- (struct i40e_virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hena *vrh =
+ (struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
@@ -2530,170 +2528,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
- aq_ret);
-}
-
-/**
- * i40e_vc_validate_vf_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @msghndl: msg handle
- *
- * validate msg
- **/
-static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
-{
- bool err_msg_format = false;
- int valid_len = 0;
-
- /* Check if VF is disabled. */
- if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
- return I40E_ERR_PARAM;
-
- /* Validate message length. */
- switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- valid_len = sizeof(struct i40e_virtchnl_version_info);
- break;
- case I40E_VIRTCHNL_OP_RESET_VF:
- break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- if (VF_IS_V11(vf))
- valid_len = sizeof(u32);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_txq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_rxq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vsi_queue_config_info *vqc =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- i40e_virtchnl_queue_pair_info));
- if (vqc->num_queue_pairs == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_irq_map_info *vimi =
- (struct i40e_virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
- if (vimi->num_vectors == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_ether_addr_list *veal =
- (struct i40e_virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct i40e_virtchnl_ether_addr);
- if (veal->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
- if (vfl->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- valid_len = sizeof(struct i40e_virtchnl_promisc_info);
- break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- case I40E_VIRTCHNL_OP_IWARP:
- /* These messages are opaque to us and will be validated in
- * the RDMA client code. We just need to check for nonzero
- * length. The firmware will enforce max length restrictions.
- */
- if (msglen)
- valid_len = msglen;
- else
- err_msg_format = true;
- break;
- case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- valid_len = 0;
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_iwarp_qvlist_info *qv =
- (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct i40e_virtchnl_iwarp_qv_info));
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct i40e_virtchnl_rss_key);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_rss_key *vrk =
- (struct i40e_virtchnl_rss_key *)msg;
- if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
- err_msg_format = true;
- break;
- }
- valid_len += vrk->key_len - 1;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct i40e_virtchnl_rss_lut);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_rss_lut *vrl =
- (struct i40e_virtchnl_rss_lut *)msg;
- if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
- err_msg_format = true;
- break;
- }
- valid_len += vrl->lut_entries - 1;
- }
- break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct i40e_virtchnl_rss_hena);
- break;
- /* These are always errors coming from the VF. */
- case I40E_VIRTCHNL_OP_EVENT:
- case I40E_VIRTCHNL_OP_UNKNOWN:
- default:
- return -EPERM;
- }
- /* few more checks */
- if ((valid_len != msglen) || (err_msg_format)) {
- i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
- return -EINVAL;
- } else {
- return 0;
- }
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
}
/**
@@ -2719,80 +2554,104 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
if (local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
/* perform basic checks on the msg */
- ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+
+ /* perform additional checks specific to this driver */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
+ ret = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
+ ret = -EINVAL;
+ }
if (ret) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
- return ret;
+ switch (ret) {
+ case VIRTCHNL_ERR_PARAM:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf, msg);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
break;
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
ret = 0;
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ret = i40e_vc_config_queues_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
i40e_vc_notify_vf_link_state(vf);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
break;
- case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
ret = i40e_vc_get_rss_hena(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ case VIRTCHNL_OP_SET_RSS_HENA:
ret = i40e_vc_set_rss_hena(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_UNKNOWN:
+ case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id);
@@ -3220,7 +3079,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int abs_vf_id;
@@ -3236,8 +3095,8 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
switch (link) {
case IFLA_VF_LINK_STATE_AUTO:
@@ -3245,6 +3104,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
pfe.event_data.link_event.link_status =
pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
pf->hw.phy.link_info.link_speed;
break;
case IFLA_VF_LINK_STATE_ENABLE:
@@ -3264,7 +3124,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
goto error_out;
}
/* Notify the VF of its new link state */
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 20d7c8160e9e..1f4b0c504368 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -40,9 +40,6 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
-#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
-#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
-
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -81,13 +78,13 @@ struct i40e_vf {
s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
- struct i40e_virtchnl_version_info vf_ver;
+ struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */
u16 stag;
- struct i40e_virtchnl_ether_addr default_lan_addr;
+ struct virtchnl_ether_addr default_lan_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted;
@@ -115,7 +112,7 @@ struct i40e_vf {
u16 num_vlan;
/* RDMA Client */
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 91d8786d386d..83e63e55c4b4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -528,7 +528,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -586,6 +586,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 43f10761f4ba..1dd1938f594f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -27,7 +27,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/**
* i40e_set_mac_type - Sets MAC type
@@ -68,6 +68,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -1054,7 +1055,7 @@ do_retry:
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -1092,9 +1093,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -1104,11 +1105,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
ether_addr_copy(hw->mac.perm_addr,
vsi_res->default_mac_addr);
ether_addr_copy(hw->mac.addr,
@@ -1128,7 +1128,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index d76393c95056..0469e4bfd3ec 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 741223d5d809..c9836bba487d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -29,7 +29,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -87,10 +87,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
deleted file mode 100644
index c5ad0388c3d5..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ /dev/null
@@ -1,449 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_IWARP = 20,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- i40e_status v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[ETH_ALEN];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[ETH_ALEN];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the RSS fields in
- * the VF resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
- * A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
- * PF configures interrupt mapping and returns status.
- */
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
-
-struct i40e_virtchnl_iwarp_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct i40e_virtchnl_iwarp_qvlist_info {
- u32 num_vectors;
- struct i40e_virtchnl_iwarp_qv_info qv_info[1];
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index b8ada6d8d890..6cc92089fecb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -43,7 +43,7 @@
#include <net/udp.h>
#include "i40e_type.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
#include "i40e_txrx.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -263,26 +263,26 @@ struct i40evf_adapter {
struct work_struct watchdog_task;
bool netdev_registered;
bool link_up;
- enum i40e_aq_link_speed link_speed;
- enum i40e_virtchnl_ops current_op;
+ enum virtchnl_link_speed link_speed;
+ enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ)
#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
- (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
+ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
- struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
- struct i40e_virtchnl_version_info pf_version;
+ VIRTCHNL_VF_OFFLOAD_VLAN)
+ struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
u16 msg_enable;
@@ -348,7 +348,7 @@ void i40evf_set_hena(struct i40evf_adapter *adapter);
void i40evf_set_rss_key(struct i40evf_adapter *adapter);
void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
int i40evf_lan_add_device(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index ee737680a0e9..93cf5fd17d91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -120,7 +120,7 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev)
return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
I40E_SUCCESS, NULL, 0, NULL);
if (err)
@@ -410,7 +410,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
if (adapter->aq_required)
return -EAGAIN;
- err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
I40E_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
@@ -431,7 +431,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info)
{
- struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct i40evf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info;
i40e_status err;
@@ -453,14 +453,14 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
return -EINVAL;
}
- v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
- msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
- (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct virtchnl_iwarp_qv_info) *
(v_qvlist_info->num_vectors - 1));
- adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = i40e_aq_send_msg_to_pf(&adapter->hw,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
@@ -474,7 +474,7 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < 5; i++) {
msleep(100);
if (!(adapter->client_pending &
- BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
err = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ea110a730e16..7c213a347909 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -44,9 +44,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MAJOR 3
+#define DRV_VERSION_MINOR 0
+#define DRV_VERSION_BUILD 0
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -67,6 +67,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
/* required last entry */
{0, }
};
@@ -1131,7 +1132,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
@@ -1197,6 +1198,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
{
if (!adapter->vsi_res)
return;
+ adapter->num_active_queues = 0;
kfree(adapter->tx_rings);
adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
@@ -1213,18 +1215,22 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
**/
static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
- int i;
+ int i, num_active_queues;
+
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
- adapter->tx_rings = kcalloc(adapter->num_active_queues,
+ adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
- adapter->rx_rings = kcalloc(adapter->num_active_queues,
+ adapter->rx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (i = 0; i < num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
@@ -1246,6 +1252,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
+ adapter->num_active_queues = num_active_queues;
+
return 0;
err_out:
@@ -1311,7 +1319,7 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
adapter->current_op);
@@ -1410,7 +1418,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
else
adapter->hena = I40E_DEFAULT_RSS_HENA;
@@ -1588,8 +1596,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg_val == I40E_VFR_VFACTIVE) ||
- (reg_val == I40E_VFR_COMPLETED)) {
+ if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg_val == VIRTCHNL_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
@@ -1605,7 +1613,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
return;
}
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1621,7 +1629,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1707,13 +1715,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
@@ -1854,7 +1862,7 @@ static void i40evf_reset_task(struct work_struct *work)
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (reg_val == I40E_VFR_VFACTIVE)
+ if (reg_val == VIRTCHNL_VFR_VFACTIVE)
break;
}
@@ -1888,7 +1896,7 @@ continue_reset:
/* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
@@ -1949,7 +1957,7 @@ static void i40evf_adminq_task(struct work_struct *work)
container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
+ struct virtchnl_msg *v_msg;
i40e_status ret;
u32 val, oldval;
u16 pending;
@@ -1962,14 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work)
if (!event.msg_buf)
goto out;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
- v_msg->v_retval, event.msg_buf,
+ (i40e_status)v_msg->v_retval,
+ event.msg_buf,
event.msg_len);
if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
@@ -2347,7 +2356,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev);
features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
features |= I40EVF_VLAN_FEATURES;
return features;
}
@@ -2384,8 +2393,8 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
for (i = 0; i < 100; i++) {
rstat = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat == I40E_VFR_VFACTIVE) ||
- (rstat == I40E_VFR_COMPLETED))
+ if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
+ (rstat == VIRTCHNL_VFR_COMPLETED))
return 0;
usleep_range(10, 20);
}
@@ -2401,7 +2410,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
@@ -2410,7 +2419,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
@@ -2434,7 +2443,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
- if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
@@ -2445,7 +2454,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
0;
if (!(vfres->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM;
@@ -2472,7 +2481,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size;
} else {
@@ -2558,8 +2567,8 @@ static void i40evf_init_task(struct work_struct *work)
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
adapter->pf_version.major,
adapter->pf_version.minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2573,9 +2582,9 @@ static void i40evf_init_task(struct work_struct *work)
case __I40EVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource));
+ sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res)
goto err;
@@ -2606,7 +2615,7 @@ static void i40evf_init_task(struct work_struct *work)
if (i40evf_process_config(adapter))
goto err_alloc;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
@@ -2634,9 +2643,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
@@ -2644,7 +2650,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
err = i40evf_request_misc_irq(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index deb2cb8dac6b..d2bb250a71af 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -42,7 +42,7 @@
* Send message to PF and print status if failure.
**/
static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+ enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
@@ -68,12 +68,12 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
**/
int i40evf_send_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info vvi;
+ struct virtchnl_version_info vvi;
- vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
- vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ vvi.major = VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = VIRTCHNL_VERSION_MINOR;
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
sizeof(vvi));
}
@@ -88,10 +88,10 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
**/
int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info *pf_vvi;
+ struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
@@ -109,8 +109,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_VERSION)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_VERSION)
break;
}
@@ -119,19 +119,19 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
- if (op != I40E_VIRTCHNL_OP_VERSION) {
+ if (op != VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
op);
err = -EIO;
goto out_alloc;
}
- pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
adapter->pf_version = *pf_vvi;
- if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
+ if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -152,26 +152,25 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{
u32 caps;
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP |
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
-
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+ VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps));
else
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
}
@@ -189,12 +188,12 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
u16 len;
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
@@ -210,8 +209,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
break;
}
@@ -233,20 +232,20 @@ out:
**/
void i40evf_configure_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vsi_queue_config_info *vqci;
- struct i40e_virtchnl_queue_pair_info *vqpi;
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
int i, len, max_frame = I40E_MAX_RXBUFFER;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
- (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct virtchnl_vsi_queue_config_info) +
+ (sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
@@ -279,7 +278,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
}
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
kfree(vqci);
}
@@ -292,20 +291,20 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
**/
void i40evf_enable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -317,20 +316,20 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
**/
void i40evf_disable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -343,23 +342,23 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
**/
void i40evf_map_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_irq_map_info *vimi;
+ struct virtchnl_irq_map_info *vimi;
int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ len = sizeof(struct virtchnl_irq_map_info) +
(adapter->num_msix_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
+ sizeof(struct virtchnl_vector_map));
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
@@ -380,7 +379,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->vecmap[v_idx].rxq_map = 0;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vimi, len);
kfree(vimi);
}
@@ -395,12 +394,12 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
**/
void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct i40evf_mac_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op);
@@ -414,17 +413,17 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
@@ -445,7 +444,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -460,12 +459,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
**/
void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op);
@@ -479,17 +478,17 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
@@ -510,7 +509,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -525,12 +524,12 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
**/
void i40evf_add_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct i40evf_vlan_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op);
@@ -545,16 +544,16 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -575,7 +574,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -589,12 +588,12 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
**/
void i40evf_del_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op);
@@ -609,16 +608,16 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -640,7 +639,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -653,25 +652,25 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
**/
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
- struct i40e_virtchnl_promisc_info vpi;
+ struct virtchnl_promisc_info vpi;
int promisc_all;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op);
return;
}
- promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc_all = FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) {
adapter->flags |= I40EVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
- if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
@@ -683,10 +682,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
(u8 *)&vpi, sizeof(vpi));
}
@@ -698,19 +697,19 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
**/
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
- if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs)))
/* if the request failed, don't lock out others */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -721,15 +720,15 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
**/
void i40evf_get_hena(struct i40evf_adapter *adapter)
{
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
NULL, 0);
}
@@ -741,18 +740,18 @@ void i40evf_get_hena(struct i40evf_adapter *adapter)
**/
void i40evf_set_hena(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hena vrh;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
adapter->current_op);
return;
}
vrh.hena = adapter->hena;
- adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&vrh, sizeof(vrh));
}
@@ -764,16 +763,16 @@ void i40evf_set_hena(struct i40evf_adapter *adapter)
**/
void i40evf_set_rss_key(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_key *vrk;
+ struct virtchnl_rss_key *vrk;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_key) +
+ len = sizeof(struct virtchnl_rss_key) +
(adapter->rss_key_size * sizeof(u8)) - 1;
vrk = kzalloc(len, GFP_KERNEL);
if (!vrk)
@@ -782,9 +781,9 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
vrk->key_len = adapter->rss_key_size;
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)vrk, len);
kfree(vrk);
}
@@ -797,16 +796,16 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
**/
void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_lut *vrl;
+ struct virtchnl_rss_lut *vrl;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_lut) +
+ len = sizeof(struct virtchnl_rss_lut) +
(adapter->rss_lut_size * sizeof(u8)) - 1;
vrl = kzalloc(len, GFP_KERNEL);
if (!vrl)
@@ -814,9 +813,9 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
vrl->vsi_id = adapter->vsi.id;
vrl->lut_entries = adapter->rss_lut_size;
memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)vrl, len);
kfree(vrl);
}
@@ -872,8 +871,8 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
void i40evf_request_reset(struct i40evf_adapter *adapter)
{
/* Don't check CURRENT_OP - this is always higher priority */
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -889,17 +888,17 @@ void i40evf_request_reset(struct i40evf_adapter *adapter)
* This function handles the reply messages.
**/
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
- if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)msg;
+ if (v_opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)msg;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
if (adapter->link_up !=
@@ -916,7 +915,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_print_link_message(adapter);
}
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
@@ -933,19 +932,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
if (v_retval) {
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
@@ -957,7 +956,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS: {
+ case VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
netdev->stats.rx_packets = stats->rx_unicast +
@@ -974,10 +973,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats;
}
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
- u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ case VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len));
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
/* restore current mac address */
@@ -985,18 +984,18 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_process_config(adapter);
}
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
if (adapter->state == __I40EVF_DOWN_PENDING)
adapter->state = __I40EVF_DOWN;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
* it's no problem.
@@ -1004,7 +1003,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case I40E_VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_IWARP:
/* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't
* care about that.
@@ -1014,13 +1013,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &=
- ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct i40e_virtchnl_rss_hena *vrh =
- (struct i40e_virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena;
else
@@ -1034,5 +1032,5 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_op, v_opcode);
break;
} /* switch v_opcode */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ee443985581f..4a50870e0fa7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -257,6 +257,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
}
/* Set phy->phy_addr and phy->id. */
+ igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index bf9bf9056d0c..06ffb2bc713e 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -563,6 +563,7 @@ struct igb_adapter {
struct cyclecounter cc;
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
bool pps_sys_wrap_on;
@@ -666,7 +667,7 @@ void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
-void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
void igb_set_ethtool_ops(struct net_device *);
void igb_power_up_link(struct igb_adapter *);
@@ -676,6 +677,7 @@ void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 0efb62db6efd..d06a8db514d4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -90,6 +90,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
@@ -2315,7 +2316,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
char *p;
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, net_stats);
+ igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1cf74aa4ebd9..ec62410b035a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -191,10 +191,7 @@ static int igb_disable_sriov(struct pci_dev *dev);
static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *);
-#endif
static int igb_resume(struct device *);
static int igb_runtime_suspend(struct device *dev);
static int igb_runtime_resume(struct device *dev);
@@ -204,7 +201,6 @@ static const struct dev_pm_ops igb_pm_ops = {
SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
igb_runtime_idle)
};
-#endif
static void igb_shutdown(struct pci_dev *);
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
@@ -1822,7 +1818,7 @@ void igb_down(struct igb_adapter *adapter)
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
@@ -4690,7 +4686,7 @@ no_wait:
}
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -4726,6 +4722,7 @@ no_wait:
igb_spoof_check(adapter);
igb_ptp_rx_hang(adapter);
+ igb_ptp_tx_hang(adapter);
/* Check LVMMC register on i350/i354 only */
if ((adapter->hw.mac.type == e1000_i350) ||
@@ -5201,9 +5198,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
return __igb_maybe_stop_tx(tx_ring, size);
}
-static void igb_tx_map(struct igb_ring *tx_ring,
- struct igb_tx_buffer *first,
- const u8 hdr_len)
+static int igb_tx_map(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
@@ -5314,7 +5311,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
*/
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -5345,6 +5342,8 @@ dma_error:
tx_buffer->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -1;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -5390,6 +5389,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
adapter->ptp_tx_start = jiffies;
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
}
}
@@ -5410,13 +5411,24 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
- igb_tx_map(tx_ring, first, hdr_len);
+ if (igb_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ if (adapter->hw.mac.type == e1000_82576)
+ cancel_work_sync(&adapter->ptp_tx_work);
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
return NETDEV_TX_OK;
}
@@ -5487,7 +5499,7 @@ static void igb_get_stats64(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
spin_unlock(&adapter->stats64_lock);
}
@@ -5536,9 +5548,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
* igb_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void igb_update_stats(struct igb_adapter *adapter,
- struct rtnl_link_stats64 *net_stats)
+void igb_update_stats(struct igb_adapter *adapter)
{
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 reg, mpc;
@@ -6457,8 +6469,8 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter)
igb_rar_set_index(adapter, 0);
}
-int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6487,8 +6499,8 @@ int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
return -ENOSPC;
}
-int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6540,8 +6552,8 @@ static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
return 0;
}
-int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
- const u32 info, const u8 *addr)
+static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
+ const u32 info, const u8 *addr)
{
struct pci_dev *pdev = adapter->pdev;
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
@@ -8015,9 +8027,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-static int igb_suspend(struct device *dev)
+static int __maybe_unused igb_suspend(struct device *dev)
{
int retval;
bool wake;
@@ -8036,9 +8046,8 @@ static int igb_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static int igb_resume(struct device *dev)
+static int __maybe_unused igb_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -8092,7 +8101,7 @@ static int igb_resume(struct device *dev)
return err;
}
-static int igb_runtime_idle(struct device *dev)
+static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -8104,7 +8113,7 @@ static int igb_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int igb_runtime_suspend(struct device *dev)
+static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int retval;
@@ -8124,11 +8133,10 @@ static int igb_runtime_suspend(struct device *dev)
return 0;
}
-static int igb_runtime_resume(struct device *dev)
+static int __maybe_unused igb_runtime_resume(struct device *dev)
{
return igb_resume(dev);
}
-#endif /* CONFIG_PM */
static void igb_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7a3fd4d74592..841c2a083349 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -712,6 +712,35 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
}
/**
+ * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @adapter: private network adapter structure
+ */
+void igb_ptp_tx_hang(struct igb_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT);
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
* igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure.
*
@@ -721,6 +750,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
**/
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
@@ -748,10 +778,17 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -941,6 +978,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* 82576 cannot timestamp all packets, which it needs to do to
* support both V1 Sync and Delay_Req messages
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 76263762bea1..dd5578756ae0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -733,6 +733,7 @@ struct ixgbe_adapter {
struct timecounter hw_tc;
u32 base_incval;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
void (*ptp_setup_sdp)(struct ixgbe_adapter *);
@@ -960,6 +961,7 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index c8ac46049f34..d602637ccc40 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1589,15 +1589,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* mask VLAN ID, fall through to mask VLAN priority */
+ /* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
break;
case 0xE000:
- /* mask VLAN ID only, fall through */
+ /* mask VLAN ID only */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0xEFFF:
/* no VLAN fields masked */
break;
@@ -1608,8 +1610,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
- /* Mask Flex Bytes, fall through */
+ /* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
+ /* fall through */
case 0xFFFF:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index c38d50c1fcf7..4e35e7017f3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -155,7 +155,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
- /* only backplane uses autoc so fall though */
+ /* fall through - only backplane uses autoc */
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -395,7 +395,8 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
}
/* Initialize the LED link active for LED blink support */
- hw->mac.ops.init_led_link_act(hw);
+ if (hw->mac.ops.init_led_link_act)
+ hw->mac.ops.init_led_link_act(hw);
return status;
}
@@ -3548,7 +3549,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* Fall through to configure remaining packet buffers */
+ /* fall through - configure remaining packet buffers */
case (PBA_STRATEGY_EQUAL):
/* Divide the remaining Rx packet buffer evenly among the TCs */
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
@@ -4120,15 +4121,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
speedcnt++;
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status)
- return status;
-
- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
@@ -4180,15 +4172,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status)
- return status;
-
- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
@@ -4295,4 +4278,23 @@ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
return;
}
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+ return;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+ return;
+ }
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7e5e336d7dcc..72c565712a5f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -111,6 +111,9 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
+ {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
+ {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
+ {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1274,7 +1277,7 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
char *p = (char *)data;
- int i;
+ unsigned int i;
switch (stringset) {
case ETH_SS_TEST:
@@ -2254,6 +2257,9 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -2665,6 +2671,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
}
+ /* fall through */
default:
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 2a653ec954f5..a23c2b5411a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -491,7 +491,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
skb_linearize(skb);
- crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc = skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d39cba214320..f1dbdf26d8e1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -58,6 +58,7 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
+#include <net/mpls.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -75,7 +76,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "5.0.0-k"
+#define DRV_VERSION "5.1.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -1451,7 +1452,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
- /* Fall Through since DCA is disabled. */
+ /* fall through - DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
@@ -2232,6 +2233,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */
@@ -2638,8 +2640,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
- if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
return;
adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -3105,23 +3106,23 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ unsigned int ri = 0, ti = 0;
int vector, err;
- int ri = 0, ti = 0;
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "TxRx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-TxRx-%u", netdev->name, ri++);
ti++;
} else if (q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-rx-%u", netdev->name, ri++);
} else if (q_vector->tx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "tx", ti++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-tx-%u", netdev->name, ti++);
} else {
/* skip this unused q_vector */
continue;
@@ -3802,6 +3803,9 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
+
+ /* Enable L3/L4 for Tx Switched packets */
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -4174,7 +4178,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
- /* fall through for older HW */
+ /* fall through */
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -6183,8 +6187,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
- u64_stats_init(&tx_ring->syncp);
-
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -6278,8 +6280,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
if (!rx_ring->rx_buffer_info)
goto err;
- u64_stats_init(&rx_ring->syncp);
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
@@ -6886,6 +6886,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ /* fall through */
case ixgbe_mac_82599EB:
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
@@ -7634,6 +7635,7 @@ static void ixgbe_service_task(struct work_struct *work)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter);
+ ixgbe_ptp_tx_hang(adapter);
}
ixgbe_service_event_complete(adapter);
@@ -7667,7 +7669,10 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
+ if (eth_p_mpls(first->protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
@@ -7871,9 +7876,9 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- const u8 hdr_len)
+static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
@@ -8000,7 +8005,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
@@ -8030,6 +8035,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -1;
}
static void ixgbe_atr(struct ixgbe_ring *ring,
@@ -8205,6 +8212,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
+ /* fall through */
default:
return fallback(dev, skb);
}
@@ -8330,16 +8338,19 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
protocol = vlan_get_protocol(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- adapter->ptp_clock &&
- !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
- &adapter->state)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
-
- /* schedule check for Tx timestamp */
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
- schedule_work(&adapter->ptp_tx_work);
+ adapter->ptp_clock) {
+ if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+
+ /* schedule check for Tx timestamp */
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ schedule_work(&adapter->ptp_tx_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
}
skb_tx_timestamp(skb);
@@ -8402,13 +8413,21 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
xmit_fcoe:
#endif /* IXGBE_FCOE */
- ixgbe_tx_map(tx_ring, first, hdr_len);
+ if (ixgbe_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_timestamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_timestamp:
+ if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ cancel_work_sync(&adapter->ptp_tx_work);
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
return NETDEV_TX_OK;
}
@@ -9195,11 +9214,14 @@ free_jump:
return err;
}
-static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
tc->type == TC_SETUP_CLSU32) {
switch (tc->cls_u32->command) {
@@ -9793,6 +9815,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!(adapter->xdp_prog);
+ xdp->prog_id = adapter->xdp_prog ?
+ adapter->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
@@ -9929,6 +9953,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
/* only support first port */
if (hw->bus.func != 0)
break;
+ /* fall through */
case IXGBE_SUBDEV_ID_82599_SP_560FLR:
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
@@ -10191,7 +10216,11 @@ skip_sriov:
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
- netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM;
+ netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
/* set this bit last since it cannot be part of vlan_features */
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -10275,6 +10304,10 @@ skip_sriov:
if (err)
goto err_sw_init;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ u64_stats_init(&adapter->rx_ring[i]->syncp);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ u64_stats_init(&adapter->tx_ring[i]->syncp);
for (i = 0; i < adapter->num_xdp_queues; i++)
u64_stats_init(&adapter->xdp_ring[i]->syncp);
@@ -10341,11 +10374,11 @@ skip_sriov:
"hardware.\n");
}
strcpy(netdev->name, "eth%d");
+ pci_set_drvdata(pdev, adapter);
err = register_netdev(netdev);
if (err)
goto err_register;
- pci_set_drvdata(pdev, adapter);
/* power down the optics for 82599 SFP+ fiber */
if (hw->mac.ops.disable_tx_laser)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5aa2c3cf7aec..b0cac961df3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -84,8 +84,9 @@
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef0635e0918c..86d6924a2b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -663,6 +663,33 @@ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @adapter: private network adapter structure
+ */
+void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IXGBE_PTP_TX_TIMEOUT);
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ ixgbe_ptp_clear_tx_timestamp(adapter);
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn(drv, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: the private adapter struct
*
@@ -672,17 +699,26 @@ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter)
*/
static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0;
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
-
ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- ixgbe_ptp_clear_tx_timestamp(adapter);
+ /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state
+ * bit prior to notifying the stack via skb_tstamp_tx(). This prevents
+ * well behaved applications from attempting to timestamp again prior
+ * to the lock bit being clear.
+ */
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and then free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -883,6 +919,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* The X550 controller is capable of timestamping all packets,
* which allows it to accept any filter.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8baf298a8516..0760bd7eeb01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -540,16 +540,15 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
- /*
- * Version 1.1 supports jumbo frames on VFs if PF has
+ /* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
*/
if (pf_max_frame > ETH_FRAME_LEN)
break;
+ /* fall through */
default:
- /*
- * If the PF or VF are running w/ jumbo frames enabled
+ /* If the PF or VF are running w/ jumbo frames enabled
* we need to shut down the VF Rx path as we cannot
* support jumbo frames on legacy VFs
*/
@@ -778,11 +777,17 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
+ s32 retval;
+
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
+ if (retval >= 0)
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
+ ETH_ALEN);
+ else
+ memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
- return 0;
+ return retval;
}
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
@@ -813,7 +818,7 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
IXGBE_WRITE_FLUSH(hw);
/* indicate to hardware that we want to set drop enable */
- reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg = IXGBE_QDE_WRITE | qde;
reg |= i << IXGBE_QDE_IDX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
}
@@ -1347,27 +1352,49 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ s32 retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
- if (is_zero_ether_addr(mac)) {
- adapter->vfinfo[vf].pf_set_mac = false;
- dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
- } else if (is_valid_ether_addr(mac)) {
- adapter->vfinfo[vf].pf_set_mac = true;
+ if (is_valid_ether_addr(mac)) {
dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
mac, vf);
dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+
+ retval = ixgbe_set_vf_mac(adapter, vf, mac);
+ if (retval >= 0) {
+ adapter->vfinfo[vf].pf_set_mac = true;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
+ }
+ } else if (is_zero_ether_addr(mac)) {
+ unsigned char *vf_mac_addr =
+ adapter->vfinfo[vf].vf_mac_addresses;
+
+ /* nothing to do */
+ if (is_zero_ether_addr(vf_mac_addr))
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
+
+ retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
+ if (retval >= 0) {
+ adapter->vfinfo[vf].pf_set_mac = false;
+ memcpy(vf_mac_addr, mac, ETH_ALEN);
+ } else {
+ dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
}
} else {
- return -EINVAL;
+ retval = -EINVAL;
}
- return ixgbe_set_vf_mac(adapter, vf, mac);
+ return retval;
}
static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 2ba024b575ea..72d84a065e34 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1750,14 +1750,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Configure internal PHY for native SFI based on module type */
ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
- if (!ret_val)
+ if (ret_val)
return ret_val;
reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
@@ -1767,7 +1767,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Setup SFI internal link. */
@@ -1798,7 +1798,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Configure internal PHY for KR/KX. */
@@ -1807,16 +1807,16 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
- /* Get external PHY device id */
- ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
if (ret_val)
return ret_val;
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
*/
- if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
slice_offset = (hw->bus.lan_id +
(hw->bus.instance_id << 1)) << 12;
else
@@ -1824,12 +1824,28 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* Configure CS4227/CS4223 LINE side to proper mode. */
reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ if (ret_val)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
- reg_phy_ext);
+
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+ if (ret_val)
+ return ret_val;
+
+ /* Flush previous write with a read */
+ return hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
}
/**
@@ -3206,6 +3222,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.setup_link = NULL;
phy->ops.read_reg = NULL;
phy->ops.write_reg = NULL;
+ phy->ops.reset = NULL;
break;
default:
break;
@@ -3819,6 +3836,28 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
};
+static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
+ X550_COMMON_MAC
+ .led_on = NULL,
+ .led_off = NULL,
+ .init_led_link_act = NULL,
+ .reset_hw = &ixgbe_reset_hw_X550em,
+ .get_media_type = &ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = &ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL,
+ .fc_autoneg = ixgbe_fc_autoneg,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
static struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
@@ -3986,7 +4025,7 @@ const struct ixgbe_info ixgbe_X550EM_x_info = {
const struct ixgbe_info ixgbe_x550em_x_fw_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = ixgbe_get_invariants_X550_x_fw,
- .mac_ops = &mac_ops_X550EM_x,
+ .mac_ops = &mac_ops_X550EM_x_fw,
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_x550em_x_fw,
.mbx_ops = &mbx_ops_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index eee29bddddc1..084c53582793 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -49,6 +49,7 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
+#include <net/mpls.h>
#include "ixgbevf.h"
@@ -56,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "3.2.2-k"
+#define DRV_VERSION "4.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
@@ -1350,23 +1351,23 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ unsigned int ri = 0, ti = 0;
int vector, err;
- int ri = 0, ti = 0;
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "TxRx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-TxRx-%u", netdev->name, ri++);
ti++;
} else if (q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-rx-%u", netdev->name, ri++);
} else if (q_vector->tx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "tx", ti++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-tx-%u", netdev->name, ti++);
} else {
/* skip this unused q_vector */
continue;
@@ -3321,7 +3322,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
+ if (eth_p_mpls(first->protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
@@ -4075,7 +4079,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
- netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM;
+ netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
netdev->hw_enc_features |= netdev->vlan_features;
/* set this bit last since it cannot be part of vlan_features */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index b6d0c01eab10..0c25006ce9af 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -335,6 +335,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -401,6 +402,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
+ /* fall through */
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f580b49e6b67..62d848df26ef 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -691,17 +691,6 @@ jme_enable_tx_engine(struct jme_adapter *jme)
}
static inline void
-jme_restart_tx_engine(struct jme_adapter *jme)
-{
- /*
- * Restart TX Engine
- */
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
- TXCS_SELECT_QUEUE0 |
- TXCS_ENABLE);
-}
-
-static inline void
jme_disable_tx_engine(struct jme_adapter *jme)
{
int i;
@@ -2382,37 +2371,6 @@ jme_tx_timeout(struct net_device *netdev)
jme_reset_link(jme);
}
-static inline void jme_pause_rx(struct jme_adapter *jme)
-{
- atomic_dec(&jme->link_changing);
-
- jme_set_rx_pcc(jme, PCC_OFF);
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_DISABLE(jme);
- } else {
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
- }
-}
-
-static inline void jme_resume_rx(struct jme_adapter *jme)
-{
- struct dynpcc_info *dpi = &(jme->dpi);
-
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_ENABLE(jme);
- } else {
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
- }
- dpi->cur = PCC_P1;
- dpi->attempt = PCC_P1;
- dpi->cnt = 0;
- jme_set_rx_pcc(jme, PCC_P1);
-
- atomic_inc(&jme->link_changing);
-}
-
static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -2652,12 +2610,11 @@ jme_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int rc;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
- return rc;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 9fae98caf83a..3c0a6451273d 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -699,13 +699,12 @@ static int netdev_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
- int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 25642dee49d3..5794d98d946f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1501,10 +1501,9 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- int err;
u32 supported, advertising;
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
@@ -1520,7 +1519,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
- return err;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 90a60b98c28e..c9798210fa0f 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,41 +17,52 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/of_mdio.h>
#include <linux/sched.h>
#include <linux/wait.h>
-#define MVMDIO_SMI_DATA_SHIFT 0
-#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
-#define MVMDIO_SMI_PHY_REG_SHIFT 21
-#define MVMDIO_SMI_READ_OPERATION BIT(26)
-#define MVMDIO_SMI_WRITE_OPERATION 0
-#define MVMDIO_SMI_READ_VALID BIT(27)
-#define MVMDIO_SMI_BUSY BIT(28)
-#define MVMDIO_ERR_INT_CAUSE 0x007C
-#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
-#define MVMDIO_ERR_INT_MASK 0x0080
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+#define MVMDIO_ERR_INT_CAUSE 0x007C
+#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
+#define MVMDIO_ERR_INT_MASK 0x0080
+
+#define MVMDIO_XSMI_MGNT_REG 0x0
+#define MVMDIO_XSMI_PHYADDR_SHIFT 16
+#define MVMDIO_XSMI_DEVADDR_SHIFT 21
+#define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26)
+#define MVMDIO_XSMI_READ_OPERATION (0x7 << 26)
+#define MVMDIO_XSMI_READ_VALID BIT(29)
+#define MVMDIO_XSMI_BUSY BIT(30)
+#define MVMDIO_XSMI_ADDR_REG 0x8
/*
* SMI Timeout measurements:
* - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
* - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
*/
-#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
-#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
-#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
+#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
+#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
struct orion_mdio_dev {
- struct mutex lock;
void __iomem *regs;
struct clk *clk[3];
/*
@@ -64,14 +75,21 @@ struct orion_mdio_dev {
wait_queue_head_t smi_busy_wait;
};
-static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
-{
- return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
-}
+enum orion_mdio_bus_type {
+ BUS_TYPE_SMI,
+ BUS_TYPE_XSMI
+};
+
+struct orion_mdio_ops {
+ int (*is_done)(struct orion_mdio_dev *);
+ unsigned int poll_interval_min;
+ unsigned int poll_interval_max;
+};
/* Wait for the SMI unit to be ready for another operation
*/
-static int orion_mdio_wait_ready(struct mii_bus *bus)
+static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
+ struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
@@ -79,14 +97,14 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
int timedout = 0;
while (1) {
- if (orion_mdio_smi_is_done(dev))
+ if (ops->is_done(dev))
return 0;
else if (timedout)
break;
if (dev->err_interrupt <= 0) {
- usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
- MVMDIO_SMI_POLL_INTERVAL_MAX);
+ usleep_range(ops->poll_interval_min,
+ ops->poll_interval_max);
if (time_is_before_jiffies(end))
++timedout;
@@ -98,8 +116,7 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (timeout < 2)
timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
- orion_mdio_smi_is_done(dev),
- timeout);
+ ops->is_done(dev), timeout);
++timedout;
}
@@ -109,52 +126,61 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
return -ETIMEDOUT;
}
-static int orion_mdio_read(struct mii_bus *bus, int mii_id,
- int regnum)
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_smi_ops = {
+ .is_done = orion_mdio_smi_is_done,
+ .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
+ int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
u32 val;
int ret;
- mutex_lock(&dev->lock);
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
dev->regs);
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
val = readl(dev->regs);
if (!(val & MVMDIO_SMI_READ_VALID)) {
dev_err(bus->parent, "SMI bus read not valid\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
- ret = val & 0xFFFF;
-out:
- mutex_unlock(&dev->lock);
- return ret;
+ return val & GENMASK(15, 0);
}
-static int orion_mdio_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
+static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
{
struct orion_mdio_dev *dev = bus->priv;
int ret;
- mutex_lock(&dev->lock);
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
@@ -162,9 +188,74 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(value << MVMDIO_SMI_DATA_SHIFT)),
dev->regs);
-out:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
+}
+
+static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
+ .is_done = orion_mdio_xsmi_is_done,
+ .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
+ int ret;
+
+ if (!(regnum & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_READ_OPERATION,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) &
+ MVMDIO_XSMI_READ_VALID)) {
+ dev_err(bus->parent, "XSMI bus read not valid\n");
+ return -ENODEV;
+ }
+
+ return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
+}
+
+static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
+ int ret;
+
+ if (!(regnum & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_WRITE_OPERATION | value,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ return 0;
}
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
@@ -184,11 +275,14 @@ static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
static int orion_mdio_probe(struct platform_device *pdev)
{
+ enum orion_mdio_bus_type type;
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
+ type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "No SMI register address given\n");
@@ -200,9 +294,18 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
+ switch (type) {
+ case BUS_TYPE_SMI:
+ bus->read = orion_mdio_smi_read;
+ bus->write = orion_mdio_smi_write;
+ break;
+ case BUS_TYPE_XSMI:
+ bus->read = orion_mdio_xsmi_read;
+ bus->write = orion_mdio_xsmi_write;
+ break;
+ }
+
bus->name = "orion_mdio_bus";
- bus->read = orion_mdio_read;
- bus->write = orion_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
@@ -244,8 +347,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- mutex_init(&dev->lock);
-
if (pdev->dev.of_node)
ret = of_mdiobus_register(bus, pdev->dev.of_node);
else
@@ -294,7 +395,8 @@ static int orion_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id orion_mdio_match[] = {
- { .compatible = "marvell,orion-mdio" },
+ { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI },
+ { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI },
{ }
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d297011b535d..0aab74c2a209 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1976,9 +1976,8 @@ err_drop_frame:
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
DMA_FROM_DEVICE);
- memcpy(skb_put(skb, rx_bytes),
- data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
+ skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
@@ -2103,9 +2102,8 @@ err_drop_frame:
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
DMA_FROM_DEVICE);
- memcpy(skb_put(skb, rx_bytes),
- data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
+ skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 33c901622ed5..48d21c1e09f2 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -345,9 +345,15 @@
/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
* relative to port->base.
*/
+#define MVPP22_XLG_CTRL0_REG 0x100
+#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
+#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
+#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
+
#define MVPP22_XLG_CTRL3_REG 0x11c
#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
@@ -3911,17 +3917,6 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return data;
}
-/* Set pool number in a BM cookie */
-static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
-{
- u32 bm;
-
- bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
- bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
-
- return bm;
-}
-
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
@@ -3958,14 +3953,6 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
put_cpu();
}
-/* Refill BM pool */
-static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
- dma_addr_t dma_addr,
- phys_addr_t phys_addr)
-{
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
-}
-
/* Allocate buffers for the pool */
static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
@@ -4156,7 +4143,10 @@ static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
}
-/* Mask the current CPU's Rx/Tx interrupts */
+/* Mask the current CPU's Rx/Tx interrupts
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
@@ -4165,7 +4155,10 @@ static void mvpp2_interrupts_mask(void *arg)
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
-/* Unmask the current CPU's Rx/Tx interrupts */
+/* Unmask the current CPU's Rx/Tx interrupts.
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
@@ -4186,7 +4179,13 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
if (port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL3_REG);
val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
+ else
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
writel(val, port->base + MVPP22_XLG_CTRL3_REG);
}
@@ -4236,19 +4235,40 @@ static void mvpp2_port_enable(struct mvpp2_port *port)
{
u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val |= MVPP2_GMAC_PORT_EN_MASK;
- val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val |= MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
}
static void mvpp2_port_disable(struct mvpp2_port *port)
{
u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS);
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
}
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
@@ -4521,7 +4541,11 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
return txq->descs + tx_desc;
}
-/* Update HW with number of aggregated Tx descriptors to be sent */
+/* Update HW with number of aggregated Tx descriptors to be sent
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
@@ -4532,6 +4556,9 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
/* Check if there are enough free descriptors in aggregated txq.
* If not, update the number of occupied descriptors and repeat the check.
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
*/
static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
struct mvpp2_tx_queue *aggr_txq, int num)
@@ -4550,7 +4577,12 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
return 0;
}
-/* Reserved Tx descriptors allocation request */
+/* Reserved Tx descriptors allocation request
+ *
+ * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
+ * only by mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
struct mvpp2_tx_queue *txq, int num)
{
@@ -4654,6 +4686,10 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
/* Get number of sent descriptors and decrement counter.
* The number of sent descriptors is returned.
* Per-CPU access
+ *
+ * Called only from mvpp2_txq_done(), called from mvpp2_tx()
+ * (migration disabled) and from the TX completion tasklet (migration
+ * disabled) so using smp_processor_id() is OK.
*/
static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
@@ -4668,6 +4704,9 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
MVPP2_TRANSMITTED_COUNT_OFFSET;
}
+/* Called through on_each_cpu(), so runs on all CPUs, with migration
+ * disabled, therefore using smp_processor_id() is OK.
+ */
static void mvpp2_txq_sent_counter_clear(void *arg)
{
struct mvpp2_port *port = arg;
@@ -4968,7 +5007,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
MVPP2_RXD_BM_POOL_ID_OFFS;
- mvpp2_pool_refill(port, pool,
+ mvpp2_bm_pool_put(port, pool,
mvpp2_rxdesc_dma_addr_get(port, rx_desc),
mvpp2_rxdesc_cookie_get(port, rx_desc));
}
@@ -5422,7 +5461,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
return 0;
}
@@ -5506,7 +5545,7 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
continue;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 16f97552ae98..41a5c5d2ac89 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -221,6 +221,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
+
+ if (!of_phy_is_fixed_link(mac->of_node))
+ phy_print_status(dev->phydev);
}
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
@@ -369,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
}
-static inline void mtk_irq_disable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val & ~mask, reg);
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
-static inline void mtk_irq_enable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(&eth->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val | mask, reg);
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -470,9 +493,9 @@ static void mtk_get_stats64(struct net_device *dev,
unsigned int start;
if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hw_stats->stats_lock)) {
+ if (spin_trylock_bh(&hw_stats->stats_lock)) {
mtk_stats_update_mac(mac);
- spin_unlock(&hw_stats->stats_lock);
+ spin_unlock_bh(&hw_stats->stats_lock);
}
}
@@ -969,6 +992,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
RX_DMA_VID(trxd.rxd3))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
+ skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
ring->data[idx] = new_data;
@@ -1098,7 +1122,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1132,7 +1156,7 @@ poll_again:
goto poll_again;
}
napi_complete(napi);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done + budget - remain_budget;
}
@@ -1667,7 +1691,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1679,7 +1703,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1691,11 +1715,11 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
#endif
@@ -1736,8 +1760,8 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
atomic_inc(&eth->dma_refcnt);
@@ -1782,8 +1806,8 @@ static int mtk_stop(struct net_device *dev)
if (!atomic_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -1858,11 +1882,13 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1933,8 +1959,8 @@ static void mtk_uninit(struct net_device *dev)
phy_disconnect(dev->phydev);
if (of_phy_is_fixed_link(mac->of_node))
of_phy_deregister_fixed_link(mac->of_node);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2056,7 +2082,9 @@ static int mtk_get_link_ksettings(struct net_device *ndev,
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
+
+ return 0;
}
static int mtk_set_link_ksettings(struct net_device *ndev,
@@ -2156,9 +2184,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
return;
if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hwstats->stats_lock)) {
+ if (spin_trylock_bh(&hwstats->stats_lock)) {
mtk_stats_update_mac(mac);
- spin_unlock(&hwstats->stats_lock);
+ spin_unlock_bh(&hwstats->stats_lock);
}
}
@@ -2392,7 +2420,8 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
- spin_lock_init(&eth->irq_lock);
+ spin_lock_init(&eth->tx_irq_lock);
+ spin_lock_init(&eth->rx_irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3c46a3b613b9..5868a09f623a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -125,7 +125,14 @@
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT 4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME 4
+#define MTK_PDMA_DELAY_RX_DELAY \
+ (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+ (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
/* PDMA Interrupt Status Register */
#define MTK_PDMA_INT_STATUS 0xa20
@@ -206,6 +213,7 @@
/* QDMA Interrupt Status Register */
#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
@@ -214,8 +222,7 @@
#define MTK_TX_DONE_INT2 BIT(2)
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
-#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \
- MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
@@ -512,6 +519,8 @@ struct mtk_rx_ring {
* @dev: The device pointer
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
* @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
* dummy for NAPI to work
* @netdev: The netdev instances
@@ -540,7 +549,8 @@ struct mtk_eth {
struct device *dev;
void __iomem *base;
spinlock_t page_lock;
- spinlock_t irq_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d54701047401..84a200764111 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 2e2a5ec509ac..016aa263bc04 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
obj-$(CONFIG_MLXSW_CORE) += mlxsw/
+obj-$(CONFIG_MLXFW) += mlxfw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c1af47e45d3f..674773b28b2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -3280,7 +3280,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_dbg(dev,
- "updating vf %d port %d no link state HW enforcment\n",
+ "updating vf %d port %d no link state HW enforcement\n",
vf, port);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 09dd3776db76..85fe17e4dcfb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -146,16 +146,25 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
- cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (cq->type != RX)
+ switch (cq->type) {
+ case TX:
+ cq->mcq.comp = mlx4_en_tx_irq;
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
- else
+ napi_enable(&cq->napi);
+ break;
+ case RX:
+ cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
-
- napi_enable(&cq->napi);
+ napi_enable(&cq->napi);
+ break;
+ case TX_XDP:
+ /* nothing regarding napi, it's shared with rx ring */
+ cq->xdp_busy = false;
+ break;
+ }
return 0;
@@ -184,8 +193,10 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
- napi_disable(&cq->napi);
- netif_napi_del(&cq->napi);
+ if (cq->type != TX_XDP) {
+ napi_disable(&cq->napi);
+ netif_napi_del(&cq->napi);
+ }
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 1dae8e40fb25..5f41dc92aa68 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -238,7 +238,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
- if (mlx4_en_setup_tc(dev, num_tcs))
+ if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
return 1;
return 0;
@@ -303,7 +303,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
+ if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
@@ -472,7 +472,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
goto err;
if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
goto err;
- if (mlx4_en_setup_tc(dev, 0))
+ if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ffbcb27c05e5..c751a1d434ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,7 +89,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ strlcpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
@@ -1750,7 +1750,8 @@ static void mlx4_en_get_channels(struct net_device *dev,
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
+ channel->tx_count = priv->tx_ring_num[TX] /
+ priv->prof->num_up;
}
static int mlx4_en_set_channels(struct net_device *dev,
@@ -1763,6 +1764,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
int port_up = 0;
int xdp_count;
int err = 0;
+ u8 up;
if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
@@ -1773,18 +1775,19 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
- if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+ if (channel->tx_count * priv->prof->num_up + xdp_count >
+ MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
- channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+ channel->tx_count * priv->prof->num_up + xdp_count,
MAX_TX_RINGS);
goto out;
}
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
- new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
@@ -1799,11 +1802,11 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- if (netdev_get_num_tc(dev))
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+ up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
+ 0 : priv->prof->num_up;
+ mlx4_en_setup_tc(dev, up);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 36a7a54bbb82..2b0cbca4beb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -46,11 +46,11 @@
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+MODULE_VERSION(DRV_VERSION);
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
@@ -125,9 +125,9 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
mutex_lock(&priv->mdev->state_lock);
- if (priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB &&
- priv->rss_map.indir_qp.qpn) {
+ if ((priv->mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) &&
+ priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) {
int i;
int err = 0;
int loopback = !!(features & NETIF_F_LOOPBACK);
@@ -169,8 +169,10 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
+ params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
- MLX4_EN_NUM_UP;
+ params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 94fab20ef146..3a291fc1780a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -60,11 +60,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
int i;
unsigned int offset = 0;
- if (up && up != MLX4_EN_NUM_UP)
+ if (up && up != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
netdev_set_num_tc(dev, up);
-
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
/* Partition Tx queues evenly amongst UP's */
for (i = 0; i < up; i++) {
netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
@@ -86,15 +86,63 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
-static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
+ int port_up = 0;
+ int err = 0;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
+ MLX4_EN_NUM_UP_HIGH;
+ new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
+ new_prof.num_up;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
+ if (err)
+ goto out;
+
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_safe_replace_resources(priv, tmp);
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ en_err(priv, "Failed starting port for setup TC\n");
+ goto out;
+ }
+ }
+
+ err = mlx4_en_setup_tc(dev, tc);
+out:
+ mutex_unlock(&mdev->state_lock);
+ kfree(tmp);
+ return err;
+}
+
+static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
+ u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
+ if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
+ return -EINVAL;
+
tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
+ return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
@@ -595,6 +643,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return err;
}
+ en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
+
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
@@ -1009,7 +1059,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev,
- &priv->rss_map.indir_qp,
+ priv->rss_map.indir_qp,
mc_list,
MLX4_PROT_ETH,
mclist->reg_id);
@@ -1031,7 +1081,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
/* needed for B0 steering support */
mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev,
- &priv->rss_map.indir_qp,
+ priv->rss_map.indir_qp,
mc_list,
priv->port, 0,
MLX4_PROT_ETH,
@@ -1349,7 +1399,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
@@ -1676,13 +1726,15 @@ int mlx4_en_start_port(struct net_device *dev)
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
+
+ /* Arm CQ for TX completions */
+ mlx4_en_arm_cq(priv, cq);
+
} else {
mlx4_en_init_recycle_ring(priv, i);
+ /* XDP TX CQ should never be armed */
}
- /* Arm CQ for TX completions */
- mlx4_en_arm_cq(priv, cq);
-
/* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *)(tx_ring->buf + j)) = 0xffffffff;
@@ -1741,7 +1793,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
- if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
@@ -1865,12 +1917,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Detach All multicasts */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(mclist, &priv->curr_list, list) {
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
if (mclist->tunnel_reg_id)
mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
@@ -2139,7 +2191,7 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
- dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
@@ -2192,6 +2244,7 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
dst->tx_ring[t] = src->tx_ring[t];
dst->tx_cq[t] = src->tx_cq[t];
}
+ dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
dst->rx_ring_num = src->rx_ring_num;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
@@ -2375,6 +2428,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -2774,7 +2828,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
tx_changed = 1;
new_prof.tx_ring_num[TX] =
- MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
+ MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
}
@@ -2819,11 +2873,25 @@ out:
return err;
}
-static bool mlx4_xdp_attached(struct net_device *dev)
+static u32 mlx4_xdp_query(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ const struct bpf_prog *xdp_prog;
+ u32 prog_id = 0;
+
+ if (!priv->tx_ring_num[TX_XDP])
+ return prog_id;
+
+ mutex_lock(&mdev->state_lock);
+ xdp_prog = rcu_dereference_protected(
+ priv->rx_ring[0]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ if (xdp_prog)
+ prog_id = xdp_prog->aux->id;
+ mutex_unlock(&mdev->state_lock);
- return !!priv->tx_ring_num[TX_XDP];
+ return prog_id;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -2832,7 +2900,8 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
case XDP_SETUP_PROG:
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = mlx4_xdp_attached(dev);
+ xdp->prog_id = mlx4_xdp_query(dev);
+ xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -3250,7 +3319,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags |= MLX4_EN_DCB_ENABLED;
priv->cee_config.pfc_state = false;
- for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index a6b0db0e0383..86d2d42d658d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -63,7 +63,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
- if (user_prio >= 0) {
+ /* force user priority per tx ring */
+ if (user_prio >= 0 && priv->prof->num_up == MLX4_EN_NUM_UP_HIGH) {
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 77abd1813047..e5fb89505a13 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -134,10 +134,11 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index,
gfp_t gfp)
{
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+ struct mlx4_en_rx_desc *rx_desc = ring->buf +
+ (index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (ring->page_cache.index > 0) {
+ if (likely(ring->page_cache.index > 0)) {
/* XDP uses a single page per frame */
if (!frags->page) {
ring->page_cache.index--;
@@ -178,6 +179,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
}
}
+/* Function not in fast-path */
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
@@ -539,14 +541,14 @@ static void validate_loopback(struct mlx4_en_priv *priv, void *va)
priv->loopback_ok = 1;
}
-static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
u32 missing = ring->actual_size - (ring->prod - ring->cons);
/* Try to batch allocations, but not too much. */
if (missing < 8)
- return false;
+ return;
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
@@ -554,9 +556,9 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
__GFP_MEMALLOC))
break;
ring->prod++;
- } while (--missing);
+ } while (likely(--missing));
- return true;
+ mlx4_en_update_rx_prod_db(ring);
}
/* When hardware doesn't strip the vlan, we need to calculate the checksum
@@ -637,21 +639,14 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_cqe *cqe;
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
- struct mlx4_en_rx_alloc *frags;
+ int factor = priv->cqe_factor;
+ struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
- int doorbell_pending;
- struct sk_buff *skb;
- int index;
- int nr;
- unsigned int length;
+ int cq_ring = cq->ring;
+ bool doorbell_pending;
+ struct mlx4_cqe *cqe;
int polled = 0;
- int ip_summed;
- int factor = priv->cqe_factor;
- u64 timestamp;
- bool l2_tunnel;
+ int index;
if (unlikely(!priv->port_up))
return 0;
@@ -659,6 +654,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (unlikely(budget <= 0))
return polled;
+ ring = priv->rx_ring[cq_ring];
+
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
@@ -673,10 +670,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
+ struct mlx4_en_rx_alloc *frags;
+ enum pkt_hash_types hash_type;
+ struct sk_buff *skb;
+ unsigned int length;
+ int ip_summed;
void *va;
+ int nr;
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
+ prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -768,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
break;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
- length, cq->ring,
+ length, cq_ring,
&doorbell_pending))) {
frags[0].page = NULL;
goto next;
@@ -790,24 +794,27 @@ xdp_drop_no_cnt:
ring->packets++;
skb = napi_get_frags(&cq->napi);
- if (!skb)
+ if (unlikely(!skb))
goto next;
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ u64 timestamp = mlx4_en_get_cqe_ts(cqe);
+
+ mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
timestamp);
}
- skb_record_rx_queue(skb, cq->ring);
+ skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
- ip_summed = CHECKSUM_UNNECESSARY;
- l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
if (l2_tunnel)
skb->csum_level = 1;
ring->csum_ok++;
@@ -822,6 +829,7 @@ xdp_drop_no_cnt:
goto csum_none;
} else {
ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
ring->csum_complete++;
}
} else {
@@ -831,16 +839,14 @@ xdp_drop_no_cnt:
} else {
csum_none:
ip_summed = CHECKSUM_NONE;
+ hash_type = PKT_HASH_TYPE_L3;
ring->csum_none++;
}
skb->ip_summed = ip_summed;
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- (ip_summed == CHECKSUM_UNNECESSARY) ?
- PKT_HASH_TYPE_L4 :
- PKT_HASH_TYPE_L3);
-
+ hash_type);
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
@@ -867,15 +873,17 @@ next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
- if (++polled == budget)
+ if (unlikely(++polled == budget))
break;
}
rcu_read_unlock();
- if (polled) {
- if (doorbell_pending)
- mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]);
+ if (likely(polled)) {
+ if (doorbell_pending) {
+ priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
+ mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
+ }
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
@@ -883,8 +891,7 @@ next:
}
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
- if (mlx4_en_refill_rx_buffers(priv, ring))
- mlx4_en_update_rx_prod_db(ring);
+ mlx4_en_refill_rx_buffers(priv, ring);
return polled;
}
@@ -907,16 +914,30 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *xdp_tx_cq = NULL;
+ bool clean_complete = true;
int done;
+ if (priv->tx_ring_num[TX_XDP]) {
+ xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ if (xdp_tx_cq->xdp_busy) {
+ clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
+ budget);
+ xdp_tx_cq->xdp_busy = !clean_complete;
+ }
+ }
+
done = mlx4_en_process_rx_cq(dev, cq, budget);
/* If we used up all the quota - we're probably not done yet... */
- if (done == budget) {
+ if (done == budget || !clean_complete) {
const struct cpumask *aff;
struct irq_data *idata;
int cpu_curr;
+ /* in case we got here because of !clean_complete */
+ done = budget;
+
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
@@ -936,7 +957,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
done--;
}
/* Done for now */
- if (napi_complete_done(napi, done))
+ if (likely(napi_complete_done(napi, done)))
mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -1099,11 +1120,14 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int i, qpn;
int err = 0;
int good_qps = 0;
+ u8 flags;
en_dbg(DRV, priv, "Configuring rss steering\n");
+
+ flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
- &rss_map->base_qpn, 0);
+ &rss_map->base_qpn, flags);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
@@ -1120,13 +1144,28 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
++good_qps;
}
+ if (priv->rx_ring_num == 1) {
+ rss_map->indir_qp = &rss_map->qps[0];
+ priv->base_qpn = rss_map->indir_qp->qpn;
+ en_info(priv, "Optimized Non-RSS steering\n");
+ return 0;
+ }
+
+ rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
+ if (!rss_map->indir_qp) {
+ err = -ENOMEM;
+ goto rss_err;
+ }
+
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
+ GFP_KERNEL);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
}
- rss_map->indir_qp.event = mlx4_en_sqp_event;
+
+ rss_map->indir_qp->event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0]->cqn, -1, &context);
@@ -1164,8 +1203,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
err = -EINVAL;
goto indir_err;
}
+
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
- &rss_map->indir_qp, &rss_map->indir_state);
+ rss_map->indir_qp, &rss_map->indir_state);
if (err)
goto indir_err;
@@ -1173,9 +1213,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
indir_err:
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
- mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
- mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+ kfree(rss_map->indir_qp);
+ rss_map->indir_qp = NULL;
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -1193,10 +1235,15 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int i;
- mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
- mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
- mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ if (priv->rx_ring_num > 1) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0,
+ rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+ kfree(rss_map->indir_qp);
+ rss_map->indir_qp = NULL;
+ }
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 17112faafbcc..88699b181946 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -63,8 +63,8 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
skb_reserve(skb, NET_IP_ALIGN);
- ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
- packet = (unsigned char *)skb_put(skb, packet_size);
+ ethh = skb_put(skb, sizeof(struct ethhdr));
+ packet = skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6ffd1849a604..4f3a9b27ce4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -234,23 +234,24 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
u8 owner)
{
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
void *end = ring->buf + ring->buf_size;
__be32 *ptr = (__be32 *)tx_desc;
int i;
/* Optimize the common case when there are no wraparounds */
- if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ if (likely((void *)tx_desc +
+ (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
/* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
}
} else {
/* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
@@ -265,11 +266,11 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
void *end = ring->buf + ring->buf_size;
struct sk_buff *skb = tx_info->skb;
@@ -288,19 +289,20 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
skb_tstamp_tx(skb, &hwts);
}
- /* Optimize the common case when there are no wraparounds */
- if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
- if (!tx_info->inl) {
- if (tx_info->linear)
- dma_unmap_single(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
- else
- dma_unmap_page(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
+ if (!tx_info->inl) {
+ if (tx_info->linear)
+ dma_unmap_single(priv->ddev,
+ tx_info->map0_dma,
+ tx_info->map0_byte_count,
+ PCI_DMA_TODEVICE);
+ else
+ dma_unmap_page(priv->ddev,
+ tx_info->map0_dma,
+ tx_info->map0_byte_count,
+ PCI_DMA_TODEVICE);
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *)tx_desc +
+ (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
for (i = 1; i < nr_maps; i++) {
data++;
dma_unmap_page(priv->ddev,
@@ -308,23 +310,10 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
}
- }
- } else {
- if (!tx_info->inl) {
- if ((void *) data >= end) {
+ } else {
+ if ((void *)data >= end)
data = ring->buf + ((void *)data - end);
- }
- if (tx_info->linear)
- dma_unmap_single(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
- else
- dma_unmap_page(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
for (i = 1; i < nr_maps; i++) {
data++;
/* Check for wraparound before unmapping */
@@ -344,7 +333,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
@@ -381,8 +370,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
while (ring->cons != ring->prod) {
ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
- !!(ring->cons & ring->size), 0,
- 0 /* Non-NAPI caller */);
+ 0, 0 /* Non-NAPI caller */);
ring->cons += ring->last_nr_txbb;
cnt++;
}
@@ -396,15 +384,14 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-static bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
struct mlx4_cqe *cqe;
- u16 index;
- u16 new_index, ring_index, stamp_index;
+ u16 index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
@@ -419,7 +406,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 last_nr_txbb;
u32 ring_cons;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return true;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -434,6 +421,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size) && (done < budget)) {
+ u16 new_index;
+
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -464,8 +453,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
/* free next descriptor */
last_nr_txbb = ring->free_tx_desc(
priv, ring, ring_index,
- !!((ring_cons + txbbs_skipped) &
- ring->size), timestamp, napi_budget);
+ timestamp, napi_budget);
mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring_cons + txbbs_stamp) &
@@ -481,7 +469,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
}
-
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
@@ -494,7 +481,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
- if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+ if (cq->type == TX_XDP)
return done < budget;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -506,6 +493,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
+
return done < budget;
}
@@ -526,7 +514,7 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- int clean_complete;
+ bool clean_complete;
clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
if (!clean_complete)
@@ -543,7 +531,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
u32 index,
unsigned int desc_size)
{
- u32 copy = (ring->size - index) * TXBB_SIZE;
+ u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
int i;
for (i = desc_size - copy - 4; i >= 0; i -= 4) {
@@ -558,12 +546,12 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
- *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+ *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
*((u32 *) (ring->bounce_buf + i));
}
/* Return real descriptor location */
- return ring->buf + index * TXBB_SIZE;
+ return ring->buf + (index << LOG_TXBB_SIZE);
}
/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
@@ -703,15 +691,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
- u8 up = 0;
if (netdev_get_num_tc(dev))
return skb_tx_hash(dev, skb);
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
-
- return fallback(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
@@ -775,37 +759,101 @@ static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
}
}
+static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
+ struct skb_shared_info *shinfo,
+ struct mlx4_wqe_data_seg *data,
+ struct sk_buff *skb,
+ int lso_header_size,
+ __be32 mr_key,
+ struct mlx4_en_tx_info *tx_info)
+{
+ struct device *ddev = priv->ddev;
+ dma_addr_t dma = 0;
+ u32 byte_count = 0;
+ int i_frag;
+
+ /* Map fragments if any */
+ for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
+ const struct skb_frag_struct *frag;
+
+ frag = &shinfo->frags[i_frag];
+ byte_count = skb_frag_size(frag);
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, byte_count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ --data;
+ }
+
+ /* Map linear part if needed */
+ if (tx_info->linear) {
+ byte_count = skb_headlen(skb) - lso_header_size;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ /* tx completion can avoid cache line miss for common cases */
+ tx_info->map0_dma = dma;
+ tx_info->map0_byte_count = byte_count;
+
+ return true;
+
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ while (++i_frag < shinfo->nr_frags) {
+ ++data;
+ dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
+ return false;
+}
+
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mlx4_en_priv *priv = netdev_priv(dev);
union mlx4_wqe_qpn_vlan qpn_vlan = {};
- struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
- int tx_ind = 0;
+ int tx_ind;
int nr_txbb;
int desc_size;
int real_size;
u32 index, bf_index;
__be32 op_own;
- u16 vlan_proto = 0;
- int i_frag;
int lso_header_size;
void *fragptr = NULL;
bool bounce = false;
bool send_doorbell;
bool stop_queue;
bool inline_ok;
+ u8 data_offset;
u32 ring_cons;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[TX][tx_ind];
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
@@ -818,7 +866,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
- nr_txbb = desc_size / TXBB_SIZE;
+ nr_txbb = desc_size >> LOG_TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
@@ -827,6 +875,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
+ u16 vlan_proto;
+
qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
vlan_proto = be16_to_cpu(skb->vlan_proto);
if (vlan_proto == ETH_P_8021AD)
@@ -851,7 +901,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring->size))
- tx_desc = ring->buf + index * TXBB_SIZE;
+ tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
@@ -863,64 +913,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
- data = &tx_desc->data;
- if (lso_header_size)
- data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
- DS_SIZE));
+ if (!lso_header_size) {
+ data = &tx_desc->data;
+ data_offset = offsetof(struct mlx4_en_tx_desc, data);
+ } else {
+ int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+
+ data = (void *)&tx_desc->lso + lso_align;
+ data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
+ }
/* valid only for none inline segments */
- tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->data_offset = data_offset;
tx_info->inl = inline_ok;
- tx_info->linear = (lso_header_size < skb_headlen(skb) &&
- !inline_ok) ? 1 : 0;
+ tx_info->linear = lso_header_size < skb_headlen(skb) && !inline_ok;
tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
data += tx_info->nr_maps - 1;
- if (!tx_info->inl) {
- dma_addr_t dma = 0;
- u32 byte_count = 0;
-
- /* Map fragments if any */
- for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
- byte_count = skb_frag_size(frag);
- dma = skb_frag_dma_map(ddev, frag,
- 0, byte_count,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma))
- goto tx_drop_unmap;
-
- data->addr = cpu_to_be64(dma);
- data->lkey = ring->mr_key;
- dma_wmb();
- data->byte_count = cpu_to_be32(byte_count);
- --data;
- }
-
- /* Map linear part if needed */
- if (tx_info->linear) {
- byte_count = skb_headlen(skb) - lso_header_size;
-
- dma = dma_map_single(ddev, skb->data +
- lso_header_size, byte_count,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(ddev, dma))
- goto tx_drop_unmap;
-
- data->addr = cpu_to_be64(dma);
- data->lkey = ring->mr_key;
- dma_wmb();
- data->byte_count = cpu_to_be32(byte_count);
- }
- /* tx completion can avoid cache line miss for common cases */
- tx_info->map0_dma = dma;
- tx_info->map0_byte_count = byte_count;
- }
+ if (!tx_info->inl)
+ if (!mlx4_en_build_dma_wqe(priv, shinfo, data, skb,
+ lso_header_size, ring->mr_key,
+ tx_info))
+ goto tx_drop_count;
/*
* For timestamping add flag to skb_shinfo and
@@ -1056,16 +1073,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
return NETDEV_TX_OK;
-tx_drop_unmap:
- en_err(priv, "DMA mapping error\n");
-
- while (++i_frag < shinfo->nr_frags) {
- ++data;
- dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
- be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
- }
-
tx_drop_count:
ring->tx_dropped++;
tx_drop:
@@ -1073,52 +1080,41 @@ tx_drop:
return NETDEV_TX_OK;
}
+#define MLX4_EN_XDP_TX_NRTXBB 1
+#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
+ / 16) & 0x3f)
+
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
- int tx_ind, int *doorbell_pending)
+ int tx_ind, bool *doorbell_pending)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
union mlx4_wqe_qpn_vlan qpn_vlan = {};
- struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
- struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
- int index, bf_index;
- bool send_doorbell;
- int nr_txbb = 1;
- bool stop_queue;
+ struct mlx4_wqe_data_seg *data;
+ struct mlx4_en_tx_ring *ring;
dma_addr_t dma;
- int real_size;
__be32 op_own;
- u32 ring_cons;
- bool bf_ok;
+ int index;
- BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
- "mlx4_en_xmit_frame requires minimum size tx desc");
+ if (unlikely(!priv->port_up))
+ goto tx_drop;
ring = priv->tx_ring[TX_XDP][tx_ind];
- if (!priv->port_up)
- goto tx_drop;
-
- if (mlx4_en_is_tx_ring_full(ring))
+ if (unlikely(mlx4_en_is_tx_ring_full(ring)))
goto tx_drop_count;
- /* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = READ_ONCE(ring->cons);
-
index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index];
- bf_ok = ring->bf_enabled;
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - ring_cons - 1));
+ (u32)(ring->prod - READ_ONCE(ring->cons) - 1));
- bf_index = ring->prod;
- tx_desc = ring->buf + index * TXBB_SIZE;
+ tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
dma = frame->dma;
@@ -1127,9 +1123,9 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
frame->page = NULL;
tx_info->map0_dma = dma;
tx_info->map0_byte_count = PAGE_SIZE;
- tx_info->nr_txbb = nr_txbb;
+ tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
- tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
tx_info->ts_requested = 0;
tx_info->nr_maps = 1;
tx_info->linear = 1;
@@ -1153,28 +1149,19 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
rx_ring->xdp_tx++;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
- ring->prod += nr_txbb;
-
- stop_queue = mlx4_en_is_tx_ring_full(ring);
- send_doorbell = stop_queue ||
- *doorbell_pending > MLX4_EN_DOORBELL_BUDGET;
- bf_ok &= send_doorbell;
+ ring->prod += MLX4_EN_XDP_TX_NRTXBB;
- real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f;
+ qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
- if (bf_ok)
- qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
- else
- qpn_vlan.fence_size = real_size;
-
- mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index,
- op_own, bf_ok, send_doorbell);
- *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1;
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0,
+ op_own, false, false);
+ *doorbell_pending = true;
return NETDEV_TX_OK;
tx_drop_count:
rx_ring->xdp_tx_full++;
+ *doorbell_pending = true;
tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 83aab1e4c8c8..a27c9c13a36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -91,7 +91,7 @@ module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
"probe_vf=port1,port2,port1+2");
-int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
+static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(enable_4k_uar,
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
static struct mlx4_profile default_profile = {
.num_qp = 1 << 18,
@@ -2356,8 +2356,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
MLX4_A0_STEERING_TABLE_SIZE;
}
- mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
- dmfs_high_rate_steering_mode_str(
+ mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
+ dmfs_high_rate_steering_mode_str(
dev->caps.dmfs_high_steer_mode));
}
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index b4f1bc56cc68..30616cd0140d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -56,8 +56,7 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb, 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
@@ -231,7 +230,6 @@ do { \
#define mlx4_warn(mdev, format, ...) \
dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
-extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
extern int mlx4_internal_err_reset;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 39f401aa3047..d350b2158104 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -58,8 +58,7 @@
#include "mlx4_stats.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -73,7 +72,8 @@
#define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128
#define MIN_RX_RINGS 4
-#define TXBB_SIZE 64
+#define LOG_TXBB_SIZE 6
+#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1)
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
@@ -115,14 +115,14 @@
#define MLX4_EN_SMALL_PKT_SIZE 64
#define MLX4_EN_MIN_TX_RING_P_UP 1
#define MLX4_EN_MAX_TX_RING_P_UP 32
-#define MLX4_EN_NUM_UP 8
-#define MLX4_EN_DEF_TX_RING_SIZE 512
+#define MLX4_EN_NUM_UP_LOW 1
+#define MLX4_EN_NUM_UP_HIGH 8
#define MLX4_EN_DEF_RX_RING_SIZE 1024
+#define MLX4_EN_DEF_TX_RING_SIZE MLX4_EN_DEF_RX_RING_SIZE
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
- MLX4_EN_NUM_UP)
+ MLX4_EN_NUM_UP_HIGH)
#define MLX4_EN_DEFAULT_TX_WORK 256
-#define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
@@ -277,7 +277,7 @@ struct mlx4_en_tx_ring {
struct netdev_queue *tx_queue;
u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner,
+ int index,
u64 timestamp, int napi_mode);
struct mlx4_en_rx_ring *recycle_ring;
@@ -360,7 +360,10 @@ struct mlx4_en_cq {
struct mlx4_hwq_resources wqres;
int ring;
struct net_device *dev;
- struct napi_struct napi;
+ union {
+ struct napi_struct napi;
+ bool xdp_busy;
+ };
int size;
int buf_size;
int vector;
@@ -384,6 +387,7 @@ struct mlx4_en_port_profile {
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
+ u8 num_up;
int rss_rings;
int inline_thold;
struct hwtstamp_config hwtstamp_config;
@@ -432,7 +436,7 @@ struct mlx4_en_rss_map {
int base_qpn;
struct mlx4_qp qps[MAX_RX_RINGS];
enum mlx4_qp_state state[MAX_RX_RINGS];
- struct mlx4_qp indir_qp;
+ struct mlx4_qp *indir_qp;
enum mlx4_qp_state indir_state;
};
@@ -483,7 +487,7 @@ enum dcb_pfc_type {
struct mlx4_en_cee_config {
bool pfc_state;
- enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP_HIGH];
};
#endif
@@ -690,7 +694,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
- int tx_ind, int *doorbell_pending);
+ int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frame);
@@ -722,13 +726,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
+bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode);
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
@@ -757,6 +763,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
#endif
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
+int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc);
#ifdef CONFIG_RFS_ACCEL
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 27251a78075c..5aee05992f27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -11,6 +11,20 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
+config MLX5_ACCEL
+ bool
+
+config MLX5_FPGA
+ bool "Mellanox Technologies Innova support"
+ depends on MLX5_CORE
+ select MLX5_ACCEL
+ ---help---
+ Build support for the Innova family of network cards by Mellanox
+ Technologies. Innova network cards are comprised of a ConnectX chip
+ and an FPGA chip on one board. If you select this option, the
+ mlx5_core driver will include the Innova FPGA core and allow building
+ sandbox-specific client drivers.
+
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
@@ -38,3 +52,15 @@ config MLX5_CORE_IPOIB
default n
---help---
MLX5 IPoIB offloads & acceleration support.
+
+config MLX5_EN_IPSEC
+ bool "IPSec XFRM cryptography-offload accelaration"
+ depends on MLX5_ACCEL
+ depends on MLX5_CORE_EN
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Build support for IPsec cryptography-offload accelaration in the NIC.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9e644615f07a..ca367445f864 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,9 +1,15 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o
+ fs_counters.o rl.o lag.o dev.o lib/gid.o
+
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
+
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
+ fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
@@ -12,4 +18,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
+
+mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
+ en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
new file mode 100644
index 000000000000..53e69edaedde
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "accel/ipsec.h"
+#include "mlx5_core.h"
+#include "fpga/ipsec.h"
+
+void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ if (!MLX5_IPSEC_DEV(mdev))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd);
+}
+
+int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
+{
+ return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
+}
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_device_caps(mdev);
+}
+
+unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_counters_count(mdev);
+}
+
+int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int count)
+{
+ return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
+}
+
+int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_ipsec_init(mdev);
+}
+
+void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+ mlx5_fpga_ipsec_cleanup(mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
new file mode 100644
index 000000000000..d6e20fea9554
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_IPSEC_H__
+#define __MLX5_ACCEL_IPSEC_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/mlx5/driver.h>
+
+enum {
+ MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
+ MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
+ MLX5_ACCEL_IPSEC_ESP = BIT(3),
+ MLX5_ACCEL_IPSEC_LSO = BIT(4),
+};
+
+#define MLX5_IPSEC_SADB_IP_AH BIT(7)
+#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
+#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
+#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
+#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
+#define MLX5_IPSEC_SADB_IPV6 BIT(2)
+
+enum {
+ MLX5_IPSEC_CMD_ADD_SA = 0,
+ MLX5_IPSEC_CMD_DEL_SA = 1,
+};
+
+enum mlx5_accel_ipsec_enc_mode {
+ MLX5_IPSEC_SADB_MODE_NONE = 0,
+ MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
+ MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
+};
+
+#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
+ MLX5_ACCEL_IPSEC_DEVICE)
+
+struct mlx5_accel_ipsec_sa {
+ __be32 cmd;
+ u8 key_enc[32];
+ u8 key_auth[32];
+ __be32 sip[4];
+ __be32 dip[4];
+ union {
+ struct {
+ __be32 reserved;
+ u8 salt_iv[8];
+ __be32 salt;
+ } __packed gcm;
+ struct {
+ u8 salt[16];
+ } __packed cbc;
+ };
+ __be32 spi;
+ __be32 sw_sa_handle;
+ __be16 tfclen;
+ u8 enc_mode;
+ u8 sip_masklen;
+ u8 dip_masklen;
+ u8 flags;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
+ * @mdev: mlx5 device
+ * @cmd: command to execute
+ * May be called from atomic context. Returns context pointer, or error
+ * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
+ * context, to cleanup the context pointer
+ */
+void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd);
+
+/**
+ * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
+ * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
+ * Sleeps (killable) until command execution is complete.
+ * Returns the command result, or -EINTR if killed
+ */
+int mlx5_accel_ipsec_sa_cmd_wait(void *context);
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
+int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int count);
+
+int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
+void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+#define MLX5_IPSEC_DEV(mdev) false
+
+static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+#endif
+
+#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 66bd213f35ce..3c95f7f53802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -274,7 +274,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
{
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 10d282841f5b..f5a2c605749f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -217,7 +217,6 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent)
kfree(ent);
}
-
static int verify_signature(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd_mailbox *next = ent->out->next;
@@ -308,6 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_FPGA_DESTROY_QP:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -420,6 +420,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_FPGA_CREATE_QP:
+ case MLX5_CMD_OP_FPGA_MODIFY_QP:
+ case MLX5_CMD_OP_FPGA_QUERY_QP:
+ case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -586,6 +590,11 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
+ MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
+ MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
default: return "unknown command opcode";
}
}
@@ -786,6 +795,8 @@ static void cmd_work_handler(struct work_struct *work)
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
+ bool poll_cmd = ent->polling;
+
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -846,7 +857,7 @@ static void cmd_work_handler(struct work_struct *work)
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
- if (cmd->mode == CMD_MODE_POLLING) {
+ if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@@ -874,7 +885,7 @@ static const char *deliv_status_to_str(u8 status)
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
return "command input length error";
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- return "command ouput length error";
+ return "command output length error";
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
return "reserved fields not cleared";
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
@@ -890,7 +901,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd *cmd = &dev->cmd;
int err;
- if (cmd->mode == CMD_MODE_POLLING) {
+ if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT;
@@ -918,7 +929,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status,
- u8 token)
+ u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -936,6 +947,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return PTR_ERR(ent);
ent->token = token;
+ ent->polling = force_polling;
if (!callback)
init_completion(&ent->done);
@@ -1001,7 +1013,6 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf,
return err ? err : count;
}
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -1153,7 +1164,7 @@ err_alloc:
}
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
- struct mlx5_cmd_msg *msg)
+ struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *head = msg->next;
struct mlx5_cmd_mailbox *next;
@@ -1537,7 +1548,8 @@ static int is_manage_pages(void *in)
}
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
- int out_size, mlx5_cmd_cbk_t callback, void *context)
+ int out_size, mlx5_cmd_cbk_t callback, void *context,
+ bool force_polling)
{
struct mlx5_cmd_msg *inb;
struct mlx5_cmd_msg *outb;
@@ -1582,7 +1594,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token);
+ pages_queue, &status, token, force_polling);
if (err)
goto out_out;
@@ -1610,7 +1622,7 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
int err;
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@@ -1619,10 +1631,22 @@ int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context)
{
- return cmd_exec(dev, in, in_size, out, out_size, callback, context);
+ return cmd_exec(dev, in, in_size, out, out_size, callback, context,
+ false);
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err;
+
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+
+ return err ? : mlx5_cmd_check(dev, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index e94a9532e218..7ecadb501743 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -168,7 +168,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return ret;
}
-
static ssize_t average_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
@@ -405,7 +404,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
@@ -466,7 +465,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
return -EINVAL;
}
-
if (is_str)
ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
else
@@ -562,7 +560,6 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
rem_res_tree(qp->dbg);
}
-
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 944fc1742464..e1b7ddfecd01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,8 +52,10 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
+#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
+#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
#define MLX5E_MAX_NUM_TC 8
@@ -70,6 +72,8 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD
+#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -213,6 +217,7 @@ struct mlx5e_cq_moder {
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
+ u16 rq_headroom;
u8 mpwqe_log_stride_sz;
u8 mpwqe_log_num_strides;
u8 log_rq_size;
@@ -323,6 +328,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_IPSEC,
};
struct mlx5e_sq_wqe_info {
@@ -443,6 +449,11 @@ struct mlx5e_dma_info {
dma_addr_t addr;
};
+struct mlx5e_wqe_frag_info {
+ struct mlx5e_dma_info di;
+ u32 offset;
+};
+
struct mlx5e_umr_dma_info {
__be64 *mtt;
dma_addr_t mtt_addr;
@@ -504,7 +515,12 @@ struct mlx5e_rq {
struct mlx5_wq_ll wq;
union {
- struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_wqe_frag_info *frag_info;
+ u32 frag_sz; /* max possible skb frag_sz */
+ bool page_reuse;
+ bool xdp_xmit;
+ } wqe;
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
@@ -625,6 +641,8 @@ struct mlx5e_tc_table {
struct rhashtable_params ht_params;
struct rhashtable ht;
+
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -745,6 +763,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ int hard_mtu;
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -766,6 +785,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5e_ipsec *ipsec;
+#endif
};
struct mlx5e_profile {
@@ -780,6 +802,7 @@ struct mlx5e_profile {
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
+ void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
@@ -814,13 +837,12 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_rx_am(struct mlx5e_rq *rq);
void mlx5e_rx_am_work(struct work_struct *work);
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-void mlx5e_update_stats(struct mlx5e_priv *priv);
+void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -847,8 +869,8 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -1019,6 +1041,31 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+/* ethtool helpers */
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo);
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data);
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data);
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info);
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
new file mode 100644
index 000000000000..bac5103efad3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <crypto/internal/geniv.h>
+#include <crypto/aead.h>
+#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "en.h"
+#include "accel/ipsec.h"
+#include "en_accel/ipsec.h"
+#include "en_accel/ipsec_rxtx.h"
+
+struct mlx5e_ipsec_sa_entry {
+ struct hlist_node hlist; /* Item in SADB_RX hashtable */
+ unsigned int handle; /* Handle in SADB_RX */
+ struct xfrm_state *x;
+ struct mlx5e_ipsec *ipsec;
+ void *context;
+};
+
+struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
+ unsigned int handle)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct xfrm_state *ret = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
+ if (sa_entry->handle == handle) {
+ ret = sa_entry->x;
+ xfrm_state_hold(ret);
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ sa_entry->handle = ret;
+ hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+ return ret;
+}
+
+static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ hash_del_rcu(&sa_entry->hlist);
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+}
+
+static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ unsigned long flags;
+
+ /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
+ synchronize_rcu();
+ spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
+ ida_simple_remove(&ipsec->halloc, sa_entry->handle);
+ spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+}
+
+static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
+{
+ unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
+
+ switch (key_len) {
+ case 16:
+ return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
+ case 32:
+ return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
+ default:
+ netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
+ key_len, x->aead->alg_name);
+ return -1;
+ }
+}
+
+static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_ipsec_sa *hw_sa)
+{
+ struct xfrm_state *x = sa_entry->x;
+ struct aead_geniv_ctx *geniv_ctx;
+ unsigned int crypto_data_len;
+ struct crypto_aead *aead;
+ unsigned int key_len;
+ int ivsize;
+
+ memset(hw_sa, 0, sizeof(*hw_sa));
+
+ if (op == MLX5_IPSEC_CMD_ADD_SA) {
+ crypto_data_len = (x->aead->alg_key_len + 7) / 8;
+ key_len = crypto_data_len - 4; /* 4 bytes salt at end */
+ aead = x->data;
+ geniv_ctx = crypto_aead_ctx(aead);
+ ivsize = crypto_aead_ivsize(aead);
+
+ memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
+ /* Duplicate 128 bit key twice according to HW layout */
+ if (key_len == 16)
+ memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
+ memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
+ hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
+ }
+
+ hw_sa->cmd = htonl(op);
+ hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
+ if (x->props.family == AF_INET) {
+ hw_sa->sip[3] = x->props.saddr.a4;
+ hw_sa->dip[3] = x->id.daddr.a4;
+ hw_sa->sip_masklen = 32;
+ hw_sa->dip_masklen = 32;
+ } else {
+ memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
+ memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
+ hw_sa->sip_masklen = 128;
+ hw_sa->dip_masklen = 128;
+ hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
+ }
+ hw_sa->spi = x->id.spi;
+ hw_sa->sw_sa_handle = htonl(sa_entry->handle);
+ switch (x->id.proto) {
+ case IPPROTO_ESP:
+ hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
+ break;
+ case IPPROTO_AH:
+ hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
+ break;
+ default:
+ break;
+ }
+ hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
+ if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
+ hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
+}
+
+static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct mlx5e_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ netdev_info(netdev, "Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ netdev_info(netdev, "Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ netdev_info(netdev, "Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128) {
+ netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+ if (x->props.family == AF_INET6 &&
+ !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
+ netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mlx5e_xfrm_add_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
+ struct net_device *netdev = x->xso.dev;
+ struct mlx5_accel_ipsec_sa hw_sa;
+ struct mlx5e_priv *priv;
+ void *context;
+ int err;
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5e_xfrm_validate_state(x);
+ if (err)
+ return err;
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->x = x;
+ sa_entry->ipsec = priv->ipsec;
+
+ /* Add the SA to handle processed incoming packets before the add SA
+ * completion was received
+ */
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry);
+ if (err) {
+ netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
+ goto err_entry;
+ }
+ }
+
+ mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
+ context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
+ if (IS_ERR(context)) {
+ err = PTR_ERR(context);
+ goto err_sadb_rx;
+ }
+
+ err = mlx5_accel_ipsec_sa_cmd_wait(context);
+ if (err)
+ goto err_sadb_rx;
+
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ goto out;
+
+err_sadb_rx:
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ mlx5e_ipsec_sadb_rx_del(sa_entry);
+ mlx5e_ipsec_sadb_rx_free(sa_entry);
+ }
+err_entry:
+ kfree(sa_entry);
+out:
+ return err;
+}
+
+static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5_accel_ipsec_sa hw_sa;
+ void *context;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ WARN_ON(sa_entry->x != x);
+
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ mlx5e_ipsec_sadb_rx_del(sa_entry);
+
+ mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
+ context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
+ if (IS_ERR(context))
+ return;
+
+ sa_entry->context = context;
+}
+
+static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ int res;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
+ WARN_ON(sa_entry->x != x);
+
+ res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
+ sa_entry->context = NULL;
+ if (res) {
+ /* Leftover object will leak */
+ return;
+ }
+
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ mlx5e_ipsec_sadb_rx_free(sa_entry);
+
+ kfree(sa_entry);
+}
+
+int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = NULL;
+
+ if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
+ return 0;
+ }
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ return -ENOMEM;
+
+ hash_init(ipsec->sadb_rx);
+ spin_lock_init(&ipsec->sadb_rx_lock);
+ ida_init(&ipsec->halloc);
+ ipsec->en_priv = priv;
+ ipsec->en_priv->ipsec = ipsec;
+ netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
+ return 0;
+}
+
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+
+ if (!ipsec)
+ return;
+
+ ida_destroy(&ipsec->halloc);
+ kfree(ipsec);
+ priv->ipsec = NULL;
+}
+
+static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = mlx5e_xfrm_add_state,
+ .xdo_dev_state_delete = mlx5e_xfrm_del_state,
+ .xdo_dev_state_free = mlx5e_xfrm_free_state,
+ .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+};
+
+void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *netdev = priv->netdev;
+
+ if (!priv->ipsec)
+ return;
+
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
+ !MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+ return;
+ }
+
+ mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
+ return;
+ }
+
+ netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
+
+ if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
+ !MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
+ return;
+ }
+
+ mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
+ netdev->features |= NETIF_F_GSO_ESP;
+ netdev->hw_features |= NETIF_F_GSO_ESP;
+ netdev->hw_enc_features |= NETIF_F_GSO_ESP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
new file mode 100644
index 000000000000..56e00baf16cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_IPSEC_H__
+#define __MLX5E_IPSEC_H__
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+#include <linux/mlx5/device.h>
+#include <net/xfrm.h>
+#include <linux/idr.h>
+
+#define MLX5E_IPSEC_SADB_RX_BITS 10
+#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
+#define MLX5E_METADATA_ETHER_LEN 8
+
+struct mlx5e_priv;
+
+struct mlx5e_ipsec_sw_stats {
+ atomic64_t ipsec_rx_drop_sp_alloc;
+ atomic64_t ipsec_rx_drop_sadb_miss;
+ atomic64_t ipsec_rx_drop_syndrome;
+ atomic64_t ipsec_tx_drop_bundle;
+ atomic64_t ipsec_tx_drop_no_state;
+ atomic64_t ipsec_tx_drop_not_ip;
+ atomic64_t ipsec_tx_drop_trailer;
+ atomic64_t ipsec_tx_drop_metadata;
+};
+
+struct mlx5e_ipsec_stats {
+ u64 ipsec_dec_in_packets;
+ u64 ipsec_dec_out_packets;
+ u64 ipsec_dec_bypass_packets;
+ u64 ipsec_enc_in_packets;
+ u64 ipsec_enc_out_packets;
+ u64 ipsec_enc_bypass_packets;
+ u64 ipsec_dec_drop_packets;
+ u64 ipsec_dec_auth_fail_packets;
+ u64 ipsec_enc_drop_packets;
+ u64 ipsec_add_sa_success;
+ u64 ipsec_add_sa_fail;
+ u64 ipsec_del_sa_success;
+ u64 ipsec_del_sa_fail;
+ u64 ipsec_cmd_drop;
+};
+
+struct mlx5e_ipsec {
+ struct mlx5e_priv *en_priv;
+ DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
+ struct ida halloc;
+ struct mlx5e_ipsec_sw_stats sw_stats;
+ struct mlx5e_ipsec_stats stats;
+};
+
+void mlx5e_ipsec_build_inverse_table(void);
+int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
+void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
+
+int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
+int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
+int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
+ unsigned int handle);
+
+#else
+
+static inline void mlx5e_ipsec_build_inverse_table(void)
+{
+}
+
+static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+{
+}
+
+static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+{
+}
+
+static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
+ uint8_t *data)
+{
+ return 0;
+}
+
+static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+}
+
+static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
new file mode 100644
index 000000000000..4a78aefdf157
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <net/xfrm.h>
+#include <net/esp.h>
+
+#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/ipsec.h"
+#include "en.h"
+
+enum {
+ MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
+ MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
+};
+
+struct mlx5e_ipsec_rx_metadata {
+ unsigned char reserved;
+ __be32 sa_handle;
+} __packed;
+
+enum {
+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
+};
+
+struct mlx5e_ipsec_tx_metadata {
+ __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
+ __be16 seq; /* LSBs of the first TCP seq, only for LSO */
+ u8 esp_next_proto; /* Next protocol of ESP */
+} __packed;
+
+struct mlx5e_ipsec_metadata {
+ unsigned char syndrome;
+ union {
+ unsigned char raw[5];
+ /* from FPGA to host, on successful decrypt */
+ struct mlx5e_ipsec_rx_metadata rx;
+ /* from host to FPGA */
+ struct mlx5e_ipsec_tx_metadata tx;
+ } __packed content;
+ /* packet type ID field */
+ __be16 ethertype;
+} __packed;
+
+#define MAX_LSO_MSS 2048
+
+/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
+static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
+
+static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
+{
+ return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
+}
+
+static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
+{
+ struct mlx5e_ipsec_metadata *mdata;
+ struct ethhdr *eth;
+
+ if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
+ return ERR_PTR(-ENOMEM);
+
+ eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
+ skb->mac_header -= sizeof(*mdata);
+ mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
+
+ memmove(skb->data, skb->data + sizeof(*mdata),
+ 2 * ETH_ALEN);
+
+ eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
+
+ memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
+ return mdata;
+}
+
+static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
+{
+ unsigned int alen = crypto_aead_authsize(x->data);
+ struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+ struct iphdr *ipv4hdr = ip_hdr(skb);
+ unsigned int trailer_len;
+ u8 plen;
+ int ret;
+
+ ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
+ if (unlikely(ret))
+ return ret;
+
+ trailer_len = alen + plen + 2;
+
+ pskb_trim(skb, skb->len - trailer_len);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
+ ip_send_check(ipv4hdr);
+ } else {
+ ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
+ trailer_len);
+ }
+ return 0;
+}
+
+static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, u8 mode,
+ struct xfrm_offload *xo)
+{
+ u8 proto;
+
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 InL4
+ * InL3
+ * Pkt: MAC IP ESP L4
+ *
+ * Offsets are in 2-byte words, counting from start of frame
+ */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (mode == XFRM_MODE_TUNNEL) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (xo->proto == IPPROTO_IPV6) {
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ proto = inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ proto = inner_ip_hdr(skb)->protocol;
+ }
+ } else {
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ proto = xo->proto;
+ }
+ switch (proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* Fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
+static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
+{
+ int iv_offset;
+ __be64 seqno;
+
+ /* Place the SN in the IV field */
+ seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+ iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
+ skb_store_bits(skb, iv_offset, &seqno, 8);
+}
+
+static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
+ struct mlx5e_ipsec_metadata *mdata,
+ struct xfrm_offload *xo)
+{
+ struct ip_esp_hdr *esph;
+ struct tcphdr *tcph;
+
+ if (skb_is_gso(skb)) {
+ /* Add LSO metadata indication */
+ esph = ip_esp_hdr(skb);
+ tcph = inner_tcp_hdr(skb);
+ netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
+ skb->network_header,
+ skb->transport_header,
+ skb->inner_network_header,
+ skb->inner_transport_header);
+ netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
+ skb->len, skb_shinfo(skb)->gso_size,
+ ntohs(tcph->source), ntohs(tcph->dest),
+ ntohl(tcph->seq), ntohl(esph->seq_no));
+ mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
+ mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
+ mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
+ } else {
+ mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
+ }
+ mdata->content.tx.esp_next_proto = xo->proto;
+
+ netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
+ mdata->syndrome, mdata->content.tx.esp_next_proto,
+ ntohs(mdata->content.tx.mss_inv),
+ ntohs(mdata->content.tx.seq));
+}
+
+struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_tx_wqe *wqe,
+ struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct mlx5e_ipsec_metadata *mdata;
+ struct xfrm_state *x;
+
+ if (!xo)
+ return skb;
+
+ if (unlikely(skb->sp->len != 1)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
+ goto drop;
+ }
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
+ goto drop;
+ }
+
+ if (unlikely(!x->xso.offload_handle ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
+ goto drop;
+ }
+
+ if (!skb_is_gso(skb))
+ if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
+ goto drop;
+ }
+ mdata = mlx5e_ipsec_add_metadata(skb);
+ if (unlikely(IS_ERR(mdata))) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
+ goto drop;
+ }
+ mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_iv(skb, xo);
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
+
+ return skb;
+
+drop:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static inline struct xfrm_state *
+mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5e_ipsec_metadata *mdata)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo;
+ struct xfrm_state *xs;
+ u32 sa_handle;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+ return NULL;
+ }
+
+ sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
+ xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
+ if (unlikely(!xs)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ return NULL;
+ }
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ switch (mdata->syndrome) {
+ case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
+ xo->status = CRYPTO_SUCCESS;
+ break;
+ case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
+ xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
+ break;
+ default:
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+ return NULL;
+ }
+ return xs;
+}
+
+struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb)
+{
+ struct mlx5e_ipsec_metadata *mdata;
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+ struct xfrm_state *xs;
+ __be16 *ethtype;
+
+ /* Detect inline metadata */
+ if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
+ return skb;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return skb;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
+ xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
+ if (unlikely(!xs)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+
+ return skb;
+}
+
+bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xfrm_state *x;
+
+ if (skb->sp && skb->sp->len) {
+ x = skb->sp->xvec[0];
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
+void mlx5e_ipsec_build_inverse_table(void)
+{
+ u16 mss_inv;
+ u32 mss;
+
+ /* Calculate 1/x inverse table for use in GSO data path.
+ * Using this table, we provide the IPSec accelerator with the value of
+ * 1/gso_size so that it can infer the position of each segment inside
+ * the GSO, and increment the ESP sequence number, and generate the IV.
+ * The HW needs this value in Q0.16 fixed-point number format
+ */
+ mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
+ for (mss = 2; mss < MAX_LSO_MSS; mss++) {
+ mss_inv = ((1ULL << 32) / mss) >> 16;
+ mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
new file mode 100644
index 000000000000..e37ae2598dbb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_IPSEC_RXTX_H__
+#define __MLX5E_IPSEC_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb);
+void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+void mlx5e_ipsec_inverse_table_init(void);
+bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features);
+struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_tx_wqe *wqe,
+ struct sk_buff *skb);
+
+#endif /* CONFIG_MLX5_EN_IPSEC */
+
+#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
new file mode 100644
index 000000000000..6fea59223dc4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "accel/ipsec.h"
+#include "fpga/sdk.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
+};
+
+static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
+};
+
+#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
+ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+
+#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
+#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
+
+#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
+
+int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+{
+ if (!priv->ipsec)
+ return 0;
+
+ return NUM_IPSEC_COUNTERS;
+}
+
+int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ unsigned int i, idx = 0;
+
+ if (!priv->ipsec)
+ return 0;
+
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
+
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_sw_stats_desc[i].format);
+
+ return NUM_IPSEC_COUNTERS;
+}
+
+void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ int ret;
+
+ if (!priv->ipsec)
+ return;
+
+ ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
+ NUM_IPSEC_HW_COUNTERS);
+ if (ret)
+ memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
+}
+
+int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ int i, idx = 0;
+
+ if (!priv->ipsec)
+ return 0;
+
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
+ mlx5e_ipsec_hw_stats_desc, i);
+
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i);
+
+ return NUM_IPSEC_COUNTERS;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c8a005326e30..12d3ced61114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -178,34 +178,26 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
+ enum mlx5e_traffic_types tt;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- switch (type) {
- case ARFS_IPV4_TCP:
- dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
- break;
- case ARFS_IPV4_UDP:
- dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
- break;
- case ARFS_IPV6_TCP:
- dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
- break;
- case ARFS_IPV6_UDP:
- dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
- break;
- default:
+ tt = arfs_get_tt(type);
+ if (tt == -EINVAL) {
+ netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
+ __func__, type);
err = -EINVAL;
goto out;
}
+ dest.tir_num = tir[tt].tirn;
+
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
&flow_act,
&dest, 1);
@@ -237,7 +229,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
@@ -481,9 +473,8 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index e706a87fc8b2..66f432385dbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -86,9 +86,8 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
int err;
@@ -128,11 +127,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(dev, "Disabling cqe compression");
+ netdev_warn(priv->netdev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
- netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -150,9 +150,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f1f17f7a3cd0..ece3fb147e3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -65,7 +65,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -145,9 +145,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
int inlen;
void *in;
-
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 16486dff1493..917fade5f5d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,15 +31,15 @@
*/
#include "en.h"
+#include "en_accel/ipsec.h"
-static void mlx5e_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ strlcpy(drvinfo->version, DRIVER_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -49,6 +49,14 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
@@ -135,6 +143,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
u8 pfc_en_rx;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
return err ? 0 : pfc_en_tx | pfc_en_rx;
@@ -147,6 +158,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
u32 tx_pause;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
return err ? false : rx_pause | tx_pause;
@@ -160,9 +174,8 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -174,7 +187,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc);
+ ARRAY_SIZE(mlx5e_pme_error_desc) +
+ mlx5e_ipsec_get_count(priv);
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
@@ -186,6 +200,13 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
}
}
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
{
int i, j, tc, prio, idx = 0;
@@ -256,6 +277,9 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+ /* IPSec counters */
+ idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -273,10 +297,9 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
priv->channel_tc2txq[i][tc]);
}
-static void mlx5e_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int i;
switch (stringset) {
@@ -297,10 +320,17 @@ static void mlx5e_get_strings(struct net_device *dev,
}
}
-static void mlx5e_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data)
+{
struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
@@ -311,7 +341,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv);
+ mlx5e_update_stats(priv, true);
channels = &priv->channels;
mutex_unlock(&priv->state_lock);
@@ -378,6 +408,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
mlx5e_pme_error_desc, i);
+ /* IPSec counters */
+ idx += mlx5e_ipsec_get_stats(priv, data + idx);
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -395,6 +428,15 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
@@ -439,10 +481,9 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
return 1 << (order_base_2(num_wqes));
}
-static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
@@ -453,10 +494,17 @@ static void mlx5e_get_ringparam(struct net_device *dev,
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
-static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
+{
int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
@@ -468,12 +516,12 @@ static int mlx5e_set_ringparam(struct net_device *dev,
int err = 0;
if (param->rx_jumbo_pending) {
- netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_mini_pending) {
- netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
__func__);
return -EINVAL;
}
@@ -486,13 +534,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
param->rx_pending);
if (param->rx_pending < min_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
min_rq_size);
return -EINVAL;
}
if (param->rx_pending > max_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
max_rq_size);
return -EINVAL;
@@ -501,19 +549,19 @@ static int mlx5e_set_ringparam(struct net_device *dev,
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
return -EINVAL;
@@ -549,26 +597,39 @@ unlock:
return err;
}
-static void mlx5e_get_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
+{
ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->channels.params.num_channels;
}
-static int mlx5e_set_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
+{
unsigned int count = ch->combined_count;
struct mlx5e_channels new_channels = {};
bool arfs_enabled;
int err = 0;
if (!count) {
- netdev_info(dev, "%s: combined_count=0 not supported\n",
+ netdev_info(priv->netdev, "%s: combined_count=0 not supported\n",
__func__);
return -EINVAL;
}
@@ -593,7 +654,7 @@ static int mlx5e_set_channels(struct net_device *dev,
if (err)
goto out;
- arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
@@ -603,7 +664,7 @@ static int mlx5e_set_channels(struct net_device *dev,
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
if (err)
- netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
__func__, err);
}
@@ -613,11 +674,17 @@ out:
return err;
}
-static int mlx5e_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
+{
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -630,6 +697,14 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return 0;
}
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
@@ -653,10 +728,9 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
}
}
-static int mlx5e_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -699,6 +773,14 @@ out:
return err;
}
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
@@ -723,24 +805,81 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
- u32 eth_proto_cap)
+static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap,
+ u8 connector_type)
{
- if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
- | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
- | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
+ if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FIBRE);
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ Backplane);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ Backplane);
+ }
+ return;
}
- if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
- | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
+ switch (connector_type) {
+ case MLX5E_PORT_TP:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, TP);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, TP);
+ break;
+ case MLX5E_PORT_AUI:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, AUI);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, AUI);
+ break;
+ case MLX5E_PORT_BNC:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, BNC);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, BNC);
+ break;
+ case MLX5E_PORT_MII:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, MII);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, MII);
+ break;
+ case MLX5E_PORT_FIBRE:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, FIBRE);
+ break;
+ case MLX5E_PORT_DA:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Backplane);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Backplane);
+ break;
+ case MLX5E_PORT_NONE:
+ case MLX5E_PORT_OTHER:
+ default:
+ break;
}
}
@@ -791,7 +930,6 @@ static void get_supported(u32 eth_proto_cap,
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
ptys2ethtool_supported_link(supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
@@ -809,8 +947,23 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
-static u8 get_connector_port(u32 eth_proto)
+static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
+ [MLX5E_PORT_UNKNOWN] = PORT_OTHER,
+ [MLX5E_PORT_NONE] = PORT_NONE,
+ [MLX5E_PORT_TP] = PORT_TP,
+ [MLX5E_PORT_AUI] = PORT_AUI,
+ [MLX5E_PORT_BNC] = PORT_BNC,
+ [MLX5E_PORT_MII] = PORT_MII,
+ [MLX5E_PORT_FIBRE] = PORT_FIBRE,
+ [MLX5E_PORT_DA] = PORT_DA,
+ [MLX5E_PORT_OTHER] = PORT_OTHER,
+ };
+
+static u8 get_connector_port(u32 eth_proto, u8 connector_type)
{
+ if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ return ptys2connector_type[connector_type];
+
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
@@ -856,6 +1009,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
u32 eth_proto_oper;
u8 an_disable_admin;
u8 an_status;
+ u8 connector_type;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -871,6 +1025,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
+ connector_type = MLX5_GET(ptys_reg, out, connector_type);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@@ -883,7 +1038,10 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- link_ksettings->base.port = get_connector_port(eth_proto_oper);
+ link_ksettings->base.port = get_connector_port(eth_proto_oper,
+ connector_type);
+ ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
+ connector_type);
get_lp_advertising(eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
@@ -1048,7 +1206,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1222,13 +1380,12 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
return err;
}
-static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int ret;
- ret = ethtool_op_get_ts_info(dev, info);
+ ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
@@ -1251,6 +1408,14 @@ static int mlx5e_get_ts_info(struct net_device *dev,
return 0;
}
+static int mlx5e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
{
__u32 ret = 0;
@@ -1638,6 +1803,40 @@ static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->netdev;
+ const struct firmware *fw;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, flash->data, &dev->dev);
+ if (err)
+ return err;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = mlx5_firmware_flash(mdev, fw);
+ release_firmware(fw);
+
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+static int mlx5e_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1658,6 +1857,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 53ed58320a24..dfccb5305e9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -170,7 +170,6 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->fs.vlan.untagged_rule;
@@ -218,11 +217,9 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
@@ -660,11 +657,9 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return ERR_PTR(-ENOMEM);
- }
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -742,7 +737,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -852,11 +847,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
u8 *mc_dmac;
u8 *mv_dmac;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
@@ -916,7 +909,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -1071,7 +1064,7 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 85bf4a389295..bdd82c9b3992 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -296,7 +296,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
err = set_flow_attrs(spec->match_criteria, spec->match_value,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 277f4de30375..1eac5003084f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,9 @@
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
+#include "en_accel/ipsec.h"
+#include "en_accel/ipsec_rxtx.h"
+#include "accel/ipsec.h"
#include "vxlan.h"
struct mlx5e_rq_param {
@@ -96,9 +99,12 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ params->rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ params->rq_headroom += NET_IP_ALIGN;
/* Extra room needed for build_skb */
- params->lro_wqe_sz -= MLX5_RX_HEADROOM +
+ params->lro_wqe_sz -= params->rq_headroom +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -112,7 +118,7 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !params->xdp_prog ?
+ !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
mlx5e_set_rq_type_params(mdev, params, rq_type);
@@ -124,7 +130,8 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ 0);
if (port_state == VPORT_STATE_UP) {
netdev_info(priv->netdev, "Link up\n");
@@ -142,7 +149,8 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mutex_unlock(&priv->state_lock);
}
@@ -195,6 +203,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
@@ -243,18 +252,14 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
int prio;
void *out;
- u32 *in;
-
- in = mlx5_vzalloc(sz);
- if (!in)
- goto free_out;
MLX5_SET(ppcnt_reg, in, local_port, 1);
@@ -262,6 +267,9 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (!full)
+ return;
+
out = pstats->RFC_2863_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
@@ -287,53 +295,57 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
mlx5_core_access_reg(mdev, in, sz, out, sz,
MLX5_REG_PPCNT, 0, 0);
}
-
-free_out:
- kvfree(in);
}
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
if (!priv->q_counter)
return;
- mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
- &qcnt->rx_out_of_buffer);
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
- u32 *in;
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
return;
- in = mlx5_vzalloc(sz);
- if (!in)
- return;
-
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- kvfree(in);
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
{
- mlx5e_update_pcie_counters(priv);
- mlx5e_update_pport_counters(priv);
+ if (full) {
+ mlx5e_update_pcie_counters(priv);
+ mlx5e_ipsec_update_stats(priv);
+ }
+ mlx5e_update_pport_counters(priv, full);
mlx5e_update_vport_counters(priv);
mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
}
+static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_update_stats(priv, false);
+}
+
void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -503,7 +515,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
if (!MLX5E_VALID_NUM_MTTS(npages))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -544,7 +556,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
- u32 frag_sz;
int npages;
int wq_sz;
int err;
@@ -576,13 +587,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- if (rq->xdp_prog) {
- rq->buff.map_dir = DMA_BIDIRECTIONAL;
- rq->rx_headroom = XDP_PACKET_HEADROOM;
- } else {
- rq->buff.map_dir = DMA_FROM_DEVICE;
- rq->rx_headroom = MLX5_RX_HEADROOM;
- }
+ rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ rq->rx_headroom = params->rq_headroom;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -591,6 +597,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev)) {
+ err = -EINVAL;
+ netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
+ goto err_rq_wq_destroy;
+ }
+#endif
if (!rq->handle_rx_cqe) {
err = -EINVAL;
netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
@@ -613,18 +626,24 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_destroy_umr_mkey;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->dma_info) {
+ rq->wqe.frag_info =
+ kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe.frag_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (c->priv->ipsec)
+ rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
+ else
+#endif
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
if (!rq->handle_rx_cqe) {
- kfree(rq->dma_info);
+ kfree(rq->wqe.frag_info);
err = -EINVAL;
netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
goto err_rq_wq_destroy;
@@ -632,16 +651,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.wqe_sz = params->lro_en ?
params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(c->netdev->mtu);
+ MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev))
+ rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN;
+#endif
+ rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- frag_sz = rq->rx_headroom +
- byte_count /* packet data */ +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- frag_sz = SKB_DATA_ALIGN(frag_sz);
-
- npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
+ npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);
byte_count |= MLX5_HW_START_PADDING;
@@ -686,7 +706,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->dma_info);
+ kfree(rq->wqe.frag_info);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
@@ -711,7 +731,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -748,7 +768,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -776,7 +796,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -805,7 +825,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -868,6 +888,16 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
+ /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
+ * but yet to be re-posted.
+ */
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+
+ for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
+ rq->dealloc_wqe(rq, wqe_ix);
+ }
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
@@ -1086,6 +1116,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode;
+ if (MLX5_IPSEC_DEV(c->priv->mdev))
+ set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
@@ -1134,7 +1166,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * csp->wq_ctrl->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1182,7 +1214,7 @@ static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1496,7 +1528,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1905,6 +1937,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2091,7 +2124,7 @@ mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2210,7 +2243,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2433,7 +2466,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2465,7 +2498,7 @@ free_in:
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+ u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
@@ -2487,7 +2520,7 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
@@ -2596,9 +2629,10 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
{
struct net_device *netdev = priv->netdev;
int new_num_txqs;
-
+ int carrier_ok;
new_num_txqs = new_chs->num * new_chs->params.num_tc;
+ carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
if (new_num_txqs < netdev->real_num_tx_queues)
@@ -2616,7 +2650,9 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ /* return carrier back if needed */
+ if (carrier_ok)
+ netif_carrier_on(netdev);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -2632,7 +2668,8 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mlx5e_timestamp_init(priv);
if (priv->profile->update_stats)
@@ -2850,7 +2887,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2889,7 +2926,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
int ix;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2992,13 +3029,17 @@ out:
}
static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
goto mqprio;
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -3053,8 +3094,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
- stats->tx_carrier_errors =
- PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
@@ -3064,7 +3103,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
*/
stats->multicast =
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
-
}
static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -3307,11 +3345,13 @@ out:
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(dev, ifr);
+ return mlx5e_hwstamp_set(priv, ifr);
case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(dev, ifr);
+ return mlx5e_hwstamp_get(priv, ifr);
default:
return -EOPNOTSUPP;
}
@@ -3490,6 +3530,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (mlx5e_ipsec_feature_check(skb, netdev, features))
+ return features;
+#endif
+
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
@@ -3537,6 +3582,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
goto unlock;
}
+ if ((netdev->features & NETIF_F_HW_ESP) && prog) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
@@ -3596,11 +3647,19 @@ unlock:
return err;
}
-static bool mlx5e_xdp_attached(struct net_device *dev)
+static u32 mlx5e_xdp_query(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
+ u32 prog_id = 0;
- return !!priv->channels.params.xdp_prog;
+ mutex_lock(&priv->state_lock);
+ xdp_prog = priv->channels.params.xdp_prog;
+ if (xdp_prog)
+ prog_id = xdp_prog->aux->id;
+ mutex_unlock(&priv->state_lock);
+
+ return prog_id;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3609,7 +3668,8 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = mlx5e_xdp_attached(dev);
+ xdp->prog_id = mlx5e_xdp_query(dev);
+ xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -3715,7 +3775,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
if (!MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_warn(mdev, "CQ modiration is not supported\n");
+ mlx5_core_warn(mdev, "CQ moderation is not supported\n");
return 0;
}
@@ -3848,7 +3908,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager))
+ MLX5_CAP_GEN(mdev, vport_group_manager))
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
@@ -3857,6 +3917,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_rq_params(mdev, params);
/* HW LRO */
+
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
@@ -3897,6 +3958,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4017,6 +4079,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
+
+ mlx5e_ipsec_build_netdev(priv);
}
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -4045,14 +4109,19 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ err = mlx5e_ipsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
if (priv->channels.params.xdp_prog)
@@ -4142,7 +4211,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
@@ -4199,8 +4268,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
- .update_stats = mlx5e_update_stats,
+ .update_stats = mlx5e_update_ndo_stats,
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
@@ -4443,6 +4513,7 @@ static struct mlx5_interface mlx5e_interface = {
void mlx5e_init(void)
{
+ mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 46984a52a94b..45e60be9c277 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -652,7 +652,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
}
static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -664,9 +665,13 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
+ chain_index,
proto, tc);
}
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -830,6 +835,9 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->channels.params.num_channels = profile->max_nch(mdev);
+
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
+
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
@@ -913,6 +921,7 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
+ .update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.max_tc = 1,
@@ -1016,7 +1025,6 @@ err_destroy_netdev:
mlx5e_destroy_netdev(netdev_priv(netdev));
kfree(rpriv);
return err;
-
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 66b5fec15313..325b2c8c1c6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -40,7 +40,8 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
+#include "en_accel/ipsec_rxtx.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -160,6 +161,11 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
+static inline bool mlx5e_page_is_reserved(struct page *page)
+{
+ return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
+}
+
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
@@ -238,22 +244,54 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
put_page(dma_info->page);
}
+static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ return rq->wqe.page_reuse && wi->di.page &&
+ (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) &&
+ !mlx5e_page_is_reserved(wi->di.page);
+}
+
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
- struct mlx5e_dma_info *di = &rq->dma_info[ix];
+ struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
- if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
- return -ENOMEM;
+ /* check if page exists, hence can be reused */
+ if (!wi->di.page) {
+ if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di)))
+ return -ENOMEM;
+ wi->offset = 0;
+ }
- wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
+ wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset +
+ rq->rx_headroom);
return 0;
}
+static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ mlx5e_page_release(rq, &wi->di, true);
+ wi->di.page = NULL;
+}
+
+static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *wi)
+{
+ if (mlx5e_page_reuse(rq, wi)) {
+ rq->stats.page_reuse++;
+ return;
+ }
+
+ mlx5e_free_rx_wqe(rq, wi);
+}
+
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_dma_info *di = &rq->dma_info[ix];
+ struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
- mlx5e_page_release(rq, di, true);
+ if (wi->di.page)
+ mlx5e_free_rx_wqe(rq, wi);
}
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
@@ -648,9 +686,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
- MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+ MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
- mlx5e_page_release(rq, di, true);
return false;
}
@@ -661,7 +698,6 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.doorbell = false;
}
rq->stats.xdp_tx_full++;
- mlx5e_page_release(rq, di, true);
return false;
}
@@ -686,10 +722,15 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ rq->wqe.xdp_xmit = true;
sq->db.di[pi] = *di;
sq->pc++;
sq->db.doorbell = true;
+
rq->stats.xdp_tx++;
return true;
}
@@ -726,35 +767,34 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
rq->stats.xdp_drop++;
- mlx5e_page_release(rq, di, true);
return true;
}
}
static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- u16 wqe_counter, u32 cqe_bcnt)
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di;
+ struct mlx5e_dma_info *di = &wi->di;
struct sk_buff *skb;
void *va, *data;
u16 rx_headroom = rq->rx_headroom;
bool consumed;
+ u32 frag_size;
- di = &rq->dma_info[wqe_counter];
- va = page_address(di->page);
+ va = page_address(di->page) + wi->offset;
data = va + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev,
- di->addr,
- rx_headroom,
- rq->buff.wqe_sz,
+ di->addr + wi->offset,
+ 0, frag_size,
DMA_FROM_DEVICE);
prefetch(data);
+ wi->offset += frag_size;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++;
- mlx5e_page_release(rq, di, true);
return NULL;
}
@@ -764,16 +804,14 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed)
return NULL; /* page/packet was consumed by XDP */
- skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ skb = build_skb(va, frag_size);
if (unlikely(!skb)) {
rq->stats.buff_alloc_err++;
- mlx5e_page_release(rq, di, true);
return NULL;
}
- /* queue up for recycling ..*/
+ /* queue up for recycling/reuse */
page_ref_inc(di->page);
- mlx5e_page_release(rq, di, true);
skb_reserve(skb, rx_headroom);
skb_put(skb, cqe_bcnt);
@@ -783,6 +821,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
@@ -792,15 +831,27 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
- if (!skb)
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (!skb) {
+ /* probably for XDP */
+ if (rq->wqe.xdp_xmit) {
+ wi->di.page = NULL;
+ rq->wqe.xdp_xmit = false;
+ /* do not return page to cache, it will be returned on XDP_TX completion */
+ goto wq_ll_pop;
+ }
+ /* probably an XDP_DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop;
+ }
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
+ mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -812,6 +863,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
@@ -821,11 +873,21 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
- if (!skb)
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (!skb) {
+ if (rq->wqe.xdp_xmit) {
+ wi->di.page = NULL;
+ rq->wqe.xdp_xmit = false;
+ /* do not return page to cache, it will be returned on XDP_TX completion */
+ goto wq_ll_pop;
+ }
+ /* probably an XDP_DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
goto wq_ll_pop;
+ }
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
@@ -834,6 +896,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
+ mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -934,7 +997,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
for (; work_done < budget; work_done++) {
- struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -988,7 +1051,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
u16 wqe_counter;
bool last_wqe;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -1038,11 +1101,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
-#define MLX5_IB_GRH_BYTES 40
-#define MLX5_IPOIB_ENCAP_LEN 4
#define MLX5_GID_SIZE 16
-#define MLX5_IPOIB_PSEUDO_LEN 20
-#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
@@ -1050,6 +1109,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
+ struct mlx5e_tstamp *tstamp = rq->tstamp;
char *pseudo_header;
u8 *dgid;
u8 g;
@@ -1074,6 +1134,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1094,6 +1157,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
@@ -1103,18 +1167,60 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb)
- goto wq_ll_pop;
+ goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
-wq_ll_pop:
+wq_free_wqe:
+ mlx5e_free_rx_wqe_reuse(rq, wi);
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}
#endif /* CONFIG_MLX5_CORE_IPOIB */
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_wqe_frag_info *wi;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_counter_be;
+ struct sk_buff *skb;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ wi = &rq->wqe.frag_info[wqe_counter];
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ if (unlikely(!skb)) {
+ /* a DROP, save the page-reuse checks */
+ mlx5e_free_rx_wqe(rq, wi);
+ goto wq_ll_pop;
+ }
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ if (unlikely(!skb)) {
+ mlx5e_free_rx_wqe(rq, wi);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+ mlx5e_free_rx_wqe_reuse(rq, wi);
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5225f2226a67..898759fcf9ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -132,14 +132,14 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
- ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ ethh = skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
- iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
+ iph = skb_put(skb, sizeof(struct iphdr));
skb_set_transport_header(skb, skb->len);
- udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+ udph = skb_put(skb, sizeof(struct udphdr));
/* Fill ETH header */
ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
@@ -167,12 +167,12 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
ip_send_check(iph);
/* Fill test header and data */
- mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh));
+ mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
datalen -= sizeof(*mlxh);
- memset(skb_put(skb, datalen), 0, datalen);
+ skb_put_zero(skb, datalen);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f81c3aa60b46..e65517eafc58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -79,6 +79,7 @@ struct mlx5e_sw_stats {
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
+ u64 rx_page_reuse;
u64 rx_cache_reuse;
u64 rx_cache_full;
u64 rx_cache_empty;
@@ -117,6 +118,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
@@ -268,7 +270,7 @@ static const struct counter_desc pport_2819_stats_desc[] = {
};
static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
@@ -319,6 +321,7 @@ struct mlx5e_rq_stats {
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
+ u64 page_reuse;
u64 cache_reuse;
u64 cache_full;
u64 cache_empty;
@@ -341,6 +344,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9df9fc0d26f5..3c536f560dd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -69,7 +69,8 @@ struct mlx5e_tc_flow {
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
- struct list_head encap; /* flows sharing the same encap */
+ struct list_head encap; /* flows sharing the same encap ID */
+ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -90,6 +91,135 @@ enum {
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+struct mod_hdr_key {
+ int num_actions;
+ void *actions;
+};
+
+struct mlx5e_mod_hdr_entry {
+ /* a node of a hash table which keeps all the mod_hdr entries */
+ struct hlist_node mod_hdr_hlist;
+
+ /* flows sharing the same mod_hdr entry */
+ struct list_head flows;
+
+ struct mod_hdr_key key;
+
+ u32 mod_hdr_id;
+};
+
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
+static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
+{
+ return jhash(key->actions,
+ key->num_actions * MLX5_MH_ACT_SZ, 0);
+}
+
+static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
+ struct mod_hdr_key *b)
+{
+ if (a->num_actions != b->num_actions)
+ return 1;
+
+ return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
+}
+
+static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int num_actions, actions_size, namespace, err;
+ struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_key key;
+ bool found = false;
+ u32 hash_key;
+
+ num_actions = parse_attr->num_mod_hdr_actions;
+ actions_size = MLX5_MH_ACT_SZ * num_actions;
+
+ key.actions = parse_attr->mod_hdr_actions;
+ key.num_actions = num_actions;
+
+ hash_key = hash_mod_hdr_info(&key);
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ namespace = MLX5_FLOW_NAMESPACE_FDB;
+ hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ namespace = MLX5_FLOW_NAMESPACE_KERNEL;
+ hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found)
+ goto attach_flow;
+
+ mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
+ if (!mh)
+ return -ENOMEM;
+
+ mh->key.actions = (void *)mh + sizeof(*mh);
+ memcpy(mh->key.actions, key.actions, actions_size);
+ mh->key.num_actions = num_actions;
+ INIT_LIST_HEAD(&mh->flows);
+
+ err = mlx5_modify_header_alloc(priv->mdev, namespace,
+ mh->key.num_actions,
+ mh->key.actions,
+ &mh->mod_hdr_id);
+ if (err)
+ goto out_err;
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ else
+ hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+
+attach_flow:
+ list_add(&flow->mod_hdr, &mh->flows);
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
+ else
+ flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
+
+ return 0;
+
+out_err:
+ kfree(mh);
+ return err;
+}
+
+static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->mod_hdr.next;
+
+ list_del(&flow->mod_hdr);
+
+ if (list_empty(next)) {
+ struct mlx5e_mod_hdr_entry *mh;
+
+ mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
+
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+ hash_del(&mh->mod_hdr_hlist);
+ kfree(mh);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -121,10 +251,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr->num_mod_hdr_actions,
- parse_attr->mod_hdr_actions,
- &attr->mod_hdr_id);
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err) {
@@ -166,8 +293,7 @@ err_add_rule:
}
err_create_ft:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
@@ -177,6 +303,7 @@ err_create_mod_hdr_id:
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(flow->rule);
@@ -188,9 +315,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
priv->fs.tc.t = NULL;
}
- if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- flow->nic_attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -213,10 +339,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr->num_mod_hdr_actions,
- parse_attr->mod_hdr_actions,
- &attr->mod_hdr_id);
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err) {
rule = ERR_PTR(err);
@@ -231,9 +354,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return rule;
err_add_rule:
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
@@ -250,19 +372,18 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
}
- mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
+ mlx5_eswitch_del_vlan_action(esw, attr);
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
- kvfree(flow->esw_attr->parse_attr);
+ kvfree(attr->parse_attr);
}
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -581,7 +702,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -765,6 +888,34 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_IP;
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+ if (mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_ipv4_ttl))
+ return -EOPNOTSUPP;
+
+ if (mask->tos || mask->ttl)
+ *min_inline = MLX5_INLINE_MODE_IP;
+ }
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -808,6 +959,25 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*min_inline = MLX5_INLINE_MODE_TCP_UDP;
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_dissector_key_tcp *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ struct flow_dissector_key_tcp *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(key->flags));
+
+ if (mask->flags)
+ *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ }
+
return 0;
}
@@ -888,32 +1058,37 @@ struct mlx5_fields {
u32 offset;
};
+#define OFFLOAD(fw_field, size, field, off) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
+
static struct mlx5_fields fields[] = {
- {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
- {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
- {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
- {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
- {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
-
- {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
-
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
- {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
- {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
-
- {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
- {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
- {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
-
- {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
- {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
+ OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
+ OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
+ OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
+ OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
+
+ OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
+ OFFLOAD(SIPV4, 4, ip4.saddr, 0),
+ OFFLOAD(DIPV4, 4, ip4.daddr, 0),
+
+ OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
+ OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
+ OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
+ OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
+ OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
+ OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
+ OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
+ OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
+ OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
+
+ OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
+ OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
+ OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
+
+ OFFLOAD(UDP_SPORT, 2, udp.source, 0),
+ OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
};
/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
@@ -925,12 +1100,14 @@ static int offload_pedit_fields(struct pedit_headers *masks,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last, first_z;
+ int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p;
struct mlx5_fields *f;
u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask;
+ __be32 mask_be32;
+ __be16 mask_be16;
void *action;
set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
@@ -984,11 +1161,19 @@ static int offload_pedit_fields(struct pedit_headers *masks,
field_bsize = f->size * BITS_PER_BYTE;
- first_z = find_first_zero_bit(&mask, field_bsize);
+ if (field_bsize == 32) {
+ mask_be32 = *(__be32 *)&mask;
+ mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+ } else if (field_bsize == 16) {
+ mask_be16 = *(__be16 *)&mask;
+ mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ }
+
first = find_first_bit(&mask, field_bsize);
+ next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
- if (first > 0 || last != (field_bsize - 1) || first_z < last) {
- printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+ if (first < next_z && next_z < last) {
+ printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
}
@@ -997,17 +1182,17 @@ static int offload_pedit_fields(struct pedit_headers *masks,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
- MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, offset, first);
/* length is num of bits to be written, zero means length of 32 */
- MLX5_SET(set_action_in, action, length, field_bsize);
+ MLX5_SET(set_action_in, action, length, (last - first + 1));
}
if (field_bsize == 32)
- MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
+ MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
else if (field_bsize == 16)
- MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
+ MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
else if (field_bsize == 8)
- MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
+ MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
nactions++;
@@ -1149,10 +1334,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (attr->action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -1435,8 +1616,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1541,8 +1722,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1777,7 +1958,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
@@ -1862,9 +2043,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5e_tc_flow *flow;
- struct tc_action *a;
struct mlx5_fc *counter;
- LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
@@ -1883,13 +2062,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- preempt_disable();
-
- tcf_exts_to_list(f->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, packets, lastuse);
-
- preempt_enable();
+ tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0;
}
@@ -1905,6 +2078,8 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ hash_init(tc->mod_hdr_tbl);
+
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ab3bb026ff9e..aaa0f4ebba9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -33,7 +33,8 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
+#include "en_accel/ipsec_rxtx.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -245,7 +246,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
@@ -299,12 +300,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
@@ -319,8 +317,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
u16 ds_cnt;
u16 ihs;
- memset(wqe, 0, sizeof(*wqe));
-
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
if (skb_is_gso(skb)) {
@@ -375,8 +371,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ memset(wqe, 0, sizeof(*wqe));
- return mlx5e_sq_xmit(sq, skb);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
+ skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
+ if (unlikely(!skb))
+ return NETDEV_TX_OK;
+ }
+#endif
+
+ return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -409,7 +418,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
u16 wqe_counter;
bool last_wqe;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
@@ -557,11 +566,16 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_is_gso(skb)) {
opcode = MLX5_OPCODE_LSO;
ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
} else {
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ sq->stats.packets++;
}
+ sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
+
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (ihs) {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 5ca6714e3e02..92db28a9ed43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,23 +32,6 @@
#include "en.h"
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
-{
- struct mlx5_cqwq *wq = &cq->wq;
- u32 ci = mlx5_cqwq_get_ci(wq);
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
- u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
- u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
-
- if (cqe_ownership_bit != sw_ownership_val)
- return NULL;
-
- /* ensure cqe content is read after cqe ownership bit */
- dma_rmb();
-
- return cqe;
-}
-
static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
struct mlx5e_icosq *sq,
struct mlx5_cqe64 *cqe,
@@ -89,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
- cqe = mlx5e_get_cqe(cq);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 33eae5ad2fb0..af51a5d2b912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "fpga/core.h"
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
@@ -156,6 +157,10 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_FAULT";
case MLX5_EVENT_TYPE_PPS_EVENT:
return "MLX5_EVENT_TYPE_PPS_EVENT";
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_ERROR";
default:
return "Unrecognized event";
}
@@ -186,7 +191,7 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
- __raw_writel((__force u32) cpu_to_be32(val), addr);
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
@@ -476,6 +481,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
break;
+
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -548,7 +558,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_buf;
@@ -667,7 +677,6 @@ int mlx5_eq_init(struct mlx5_core_dev *dev)
return err;
}
-
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
{
mlx5_eq_debugfs_cleanup(dev);
@@ -679,7 +688,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
-
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
MLX5_CAP_GEN(dev, vport_group_manager) &&
mlx5_core_is_pf(dev))
@@ -693,6 +701,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pps))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+ if (MLX5_CAP_GEN(dev, fpga))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2e34d95ea776..89bfda419efe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -248,11 +248,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return NULL;
- }
+
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -350,10 +349,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
@@ -961,7 +959,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1078,7 +1076,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1219,7 +1217,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
-
}
esw_vport_cleanup_ingress_rules(esw, vport);
@@ -1241,11 +1238,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -1322,11 +1317,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -1775,6 +1768,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
hash_init(esw->offloads.encap_tbl);
+ hash_init(esw->offloads.mod_hdr_tbl);
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -2158,7 +2152,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b746f62c8c79..834a33050969 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -207,6 +207,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8);
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode;
u64 num_flows;
u8 encap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a53e982a6863..95b64025ce36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -311,9 +311,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
@@ -401,9 +400,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
err = -ENOMEM;
goto out;
}
@@ -488,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
u32 *flow_group_in;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -631,7 +629,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int err = 0;
int nvports = priv->sriov.num_vfs + 2;
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -675,9 +673,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
@@ -694,7 +691,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
- &flow_act, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
@@ -1100,7 +1097,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
if (err) {
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
esw->offloads.encap = !encap;
- (void) esw_create_offloads_fast_fdb_table(esw);
+ (void)esw_create_offloads_fast_fdb_table(esw);
}
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
new file mode 100644
index 000000000000..e37453d838db
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+
+#include "mlx5_core.h"
+#include "fpga/cmd.h"
+
+#define MLX5_FPGA_ACCESS_REG_SZ (MLX5_ST_SZ_DW(fpga_access_reg) + \
+ MLX5_FPGA_ACCESS_REG_SIZE_MAX)
+
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+ void *buf, bool write)
+{
+ u32 in[MLX5_FPGA_ACCESS_REG_SZ] = {0};
+ u32 out[MLX5_FPGA_ACCESS_REG_SZ];
+ int err;
+
+ if (size & 3)
+ return -EINVAL;
+ if (addr & 3)
+ return -EINVAL;
+ if (size > MLX5_FPGA_ACCESS_REG_SIZE_MAX)
+ return -EINVAL;
+
+ MLX5_SET(fpga_access_reg, in, size, size);
+ MLX5_SET64(fpga_access_reg, in, address, addr);
+ if (write)
+ memcpy(MLX5_ADDR_OF(fpga_access_reg, in, data), buf, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_ACCESS_REG, 0, write);
+ if (err)
+ return err;
+
+ if (!write)
+ memcpy(buf, MLX5_ADDR_OF(fpga_access_reg, out, data), size);
+
+ return 0;
+}
+
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+ MLX5_ST_SZ_BYTES(fpga_cap),
+ MLX5_REG_FPGA_CAP, 0, 0);
+}
+
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+
+ MLX5_SET(fpga_ctrl, in, operation, op);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_CTRL, 0, true);
+}
+
+int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size)
+{
+ unsigned int cap_size = MLX5_CAP_FPGA(dev, sandbox_extended_caps_len);
+ u64 addr = MLX5_CAP64_FPGA(dev, sandbox_extended_caps_addr);
+ unsigned int read;
+ int ret = 0;
+
+ if (cap_size > size) {
+ mlx5_core_warn(dev, "Not enough buffer %u for FPGA SBU caps %u",
+ size, cap_size);
+ return -EINVAL;
+ }
+
+ while (cap_size > 0) {
+ read = min_t(unsigned int, cap_size,
+ MLX5_FPGA_ACCESS_REG_SIZE_MAX);
+
+ ret = mlx5_fpga_access_reg(dev, read, addr, caps, false);
+ if (ret) {
+ mlx5_core_warn(dev, "Error reading FPGA SBU caps %u bytes at address 0x%llx: %d",
+ read, addr, ret);
+ return ret;
+ }
+
+ cap_size -= read;
+ addr += read;
+ caps += read;
+ }
+
+ return ret;
+}
+
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_CTRL, 0, false);
+ if (err)
+ return err;
+
+ query->status = MLX5_GET(fpga_ctrl, out, status);
+ query->admin_image = MLX5_GET(fpga_ctrl, out, flash_select_admin);
+ query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper);
+ return 0;
+}
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
+ u32 *fpga_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+ int ret;
+
+ MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
+ memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
+ MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_create_qp_out, out, fpga_qpc),
+ MLX5_FLD_SZ_BYTES(fpga_create_qp_out, fpga_qpc));
+ *fpga_qpn = MLX5_GET(fpga_create_qp_out, out, fpga_qpn);
+ return ret;
+}
+
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ enum mlx5_fpga_qpc_field_select fields,
+ void *fpga_qpc)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+
+ MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
+ MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
+ MLX5_SET(fpga_modify_qp_in, in, fpga_qpn, fpga_qpn);
+ memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
+ MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
+ u32 fpga_qpn, void *fpga_qpc)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+ int ret;
+
+ MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
+ MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ memcpy(fpga_qpc, MLX5_ADDR_OF(fpga_query_qp_out, out, fpga_qpc),
+ MLX5_FLD_SZ_BYTES(fpga_query_qp_out, fpga_qpc));
+ return ret;
+}
+
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+
+ MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
+ MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ bool clear, struct mlx5_fpga_qp_counters *data)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+ int ret;
+
+ MLX5_SET(fpga_query_qp_counters_in, in, opcode,
+ MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS);
+ MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
+ MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ data->rx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_ack_packets);
+ data->rx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_send_packets);
+ data->tx_ack_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ tx_ack_packets);
+ data->tx_send_packets = MLX5_GET64(fpga_query_qp_counters_out, out,
+ tx_send_packets);
+ data->rx_total_drop = MLX5_GET64(fpga_query_qp_counters_out, out,
+ rx_total_drop);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
new file mode 100644
index 000000000000..94bdfd47c3f0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_FPGA_H__
+#define __MLX5_FPGA_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_fpga_image {
+ MLX5_FPGA_IMAGE_USER = 0,
+ MLX5_FPGA_IMAGE_FACTORY,
+};
+
+enum mlx5_fpga_status {
+ MLX5_FPGA_STATUS_SUCCESS = 0,
+ MLX5_FPGA_STATUS_FAILURE = 1,
+ MLX5_FPGA_STATUS_IN_PROGRESS = 2,
+ MLX5_FPGA_STATUS_NONE = 0xFFFF,
+};
+
+struct mlx5_fpga_query {
+ enum mlx5_fpga_image admin_image;
+ enum mlx5_fpga_image oper_image;
+ enum mlx5_fpga_status status;
+};
+
+enum mlx5_fpga_qpc_field_select {
+ MLX5_FPGA_QPC_STATE = BIT(0),
+};
+
+struct mlx5_fpga_qp_counters {
+ u64 rx_ack_packets;
+ u64 rx_send_packets;
+ u64 tx_ack_packets;
+ u64 tx_send_packets;
+ u64 rx_total_drop;
+};
+
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
+int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
+int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
+ void *buf, bool write);
+int mlx5_fpga_sbu_caps(struct mlx5_core_dev *dev, void *caps, int size);
+
+int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
+ u32 *fpga_qpn);
+int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ enum mlx5_fpga_qpc_field_select fields, void *fpga_qpc);
+int mlx5_fpga_query_qp(struct mlx5_core_dev *dev, u32 fpga_qpn, void *fpga_qpc);
+int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
+ bool clear, struct mlx5_fpga_qp_counters *data);
+int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn);
+
+#endif /* __MLX5_FPGA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
new file mode 100644
index 000000000000..c4392f741c5f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <net/addrconf.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/vport.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fpga/conn.h"
+
+#define MLX5_FPGA_PKEY 0xFFFF
+#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
+#define MLX5_FPGA_RECV_SIZE 2048
+#define MLX5_FPGA_PORT_NUM 1
+#define MLX5_FPGA_CQ_BUDGET 64
+
+static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct device *dma_device;
+ int err = 0;
+
+ if (unlikely(!buf->sg[0].data))
+ goto out;
+
+ dma_device = &conn->fdev->mdev->pdev->dev;
+ buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
+ buf->sg[0].size, buf->dma_dir);
+ err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (!buf->sg[1].data)
+ goto out;
+
+ buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
+ buf->sg[1].size, buf->dma_dir);
+ err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
+ dma_unmap_single(dma_device, buf->sg[0].dma_addr,
+ buf->sg[0].size, buf->dma_dir);
+ err = -ENOMEM;
+ }
+
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct device *dma_device;
+
+ dma_device = &conn->fdev->mdev->pdev->dev;
+ if (buf->sg[1].data)
+ dma_unmap_single(dma_device, buf->sg[1].dma_addr,
+ buf->sg[1].size, buf->dma_dir);
+
+ if (likely(buf->sg[0].data))
+ dma_unmap_single(dma_device, buf->sg[0].dma_addr,
+ buf->sg[0].size, buf->dma_dir);
+}
+
+static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix;
+ int err = 0;
+
+ err = mlx5_fpga_conn_map_buf(conn, buf);
+ if (unlikely(err))
+ goto out;
+
+ if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ return -EBUSY;
+ }
+
+ ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
+ data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
+ data->byte_count = cpu_to_be32(buf->sg[0].size);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->addr = cpu_to_be64(buf->sg[0].dma_addr);
+
+ conn->qp.rq.pc++;
+ conn->qp.rq.bufs[ix] = buf;
+
+ /* Make sure that descriptors are written before doorbell record. */
+ dma_wmb();
+ *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+ *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
+ /* Make sure that doorbell record is visible before ringing */
+ wmb();
+ mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
+}
+
+static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_wqe_data_seg *data;
+ unsigned int ix, sgi;
+ int size = 1;
+
+ ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
+
+ ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
+ data = (void *)(ctrl + 1);
+
+ for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
+ if (!buf->sg[sgi].data)
+ break;
+ data->byte_count = cpu_to_be32(buf->sg[sgi].size);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
+ data++;
+ size++;
+ }
+
+ ctrl->imm = 0;
+ ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
+ MLX5_OPCODE_SEND);
+ ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
+
+ conn->qp.sq.pc++;
+ conn->qp.sq.bufs[ix] = buf;
+ mlx5_fpga_conn_notify_hw(conn, ctrl);
+}
+
+int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ unsigned long flags;
+ int err;
+
+ if (!conn->qp.active)
+ return -ENOTCONN;
+
+ err = mlx5_fpga_conn_map_buf(conn, buf);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&conn->qp.sq.lock, flags);
+
+ if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
+ list_add_tail(&buf->list, &conn->qp.sq.backlog);
+ goto out_unlock;
+ }
+
+ mlx5_fpga_conn_post_send(conn, buf);
+
+out_unlock:
+ spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
+ return err;
+}
+
+static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int err;
+
+ buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->sg[0].data = (void *)(buf + 1);
+ buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
+ buf->dma_dir = DMA_FROM_DEVICE;
+
+ err = mlx5_fpga_conn_post_recv(conn, buf);
+ if (err)
+ kfree(buf);
+
+ return err;
+}
+
+static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe, u8 status)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int ix, err;
+
+ ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
+ buf = conn->qp.rq.bufs[ix];
+ conn->qp.rq.bufs[ix] = NULL;
+ if (!status)
+ buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
+ conn->qp.rq.cc++;
+
+ if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
+ mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+ else
+ mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+
+ if (unlikely(status || !conn->qp.active)) {
+ conn->qp.active = false;
+ kfree(buf);
+ return;
+ }
+
+ mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
+ buf->sg[0].size);
+ conn->recv_cb(conn->cb_arg, buf);
+
+ buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
+ err = mlx5_fpga_conn_post_recv(conn, buf);
+ if (unlikely(err)) {
+ mlx5_fpga_warn(conn->fdev,
+ "Failed to re-post recv buf: %d\n", err);
+ kfree(buf);
+ }
+}
+
+static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe, u8 status)
+{
+ struct mlx5_fpga_dma_buf *buf, *nextbuf;
+ unsigned long flags;
+ int ix;
+
+ spin_lock_irqsave(&conn->qp.sq.lock, flags);
+
+ ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
+ buf = conn->qp.sq.bufs[ix];
+ conn->qp.sq.bufs[ix] = NULL;
+ conn->qp.sq.cc++;
+
+ /* Handle backlog still under the spinlock to ensure message post order */
+ if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
+ if (likely(conn->qp.active)) {
+ nextbuf = list_first_entry(&conn->qp.sq.backlog,
+ struct mlx5_fpga_dma_buf, list);
+ list_del(&nextbuf->list);
+ mlx5_fpga_conn_post_send(conn, nextbuf);
+ }
+ }
+
+ spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
+
+ if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
+ mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+ else
+ mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
+ buf, conn->fpga_qpn, status);
+
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+
+ if (likely(buf->complete))
+ buf->complete(conn, conn->fdev, buf, status);
+
+ if (unlikely(status))
+ conn->qp.active = false;
+}
+
+static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 opcode, status = 0;
+
+ opcode = cqe->op_own >> 4;
+
+ switch (opcode) {
+ case MLX5_CQE_REQ_ERR:
+ status = ((struct mlx5_err_cqe *)cqe)->syndrome;
+ /* Fall through */
+ case MLX5_CQE_REQ:
+ mlx5_fpga_conn_sq_cqe(conn, cqe, status);
+ break;
+
+ case MLX5_CQE_RESP_ERR:
+ status = ((struct mlx5_err_cqe *)cqe)->syndrome;
+ /* Fall through */
+ case MLX5_CQE_RESP_SEND:
+ mlx5_fpga_conn_rq_cqe(conn, cqe, status);
+ break;
+ default:
+ mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
+ opcode);
+ }
+}
+
+static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
+{
+ mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
+ conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
+}
+
+static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
+ enum mlx5_event event)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
+ mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
+}
+
+static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
+ mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
+}
+
+static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
+ unsigned int budget)
+{
+ struct mlx5_cqe64 *cqe;
+
+ while (budget) {
+ cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
+ if (!cqe)
+ break;
+
+ budget--;
+ mlx5_cqwq_pop(&conn->cq.wq);
+ mlx5_fpga_conn_handle_cqe(conn, cqe);
+ mlx5_cqwq_update_db_record(&conn->cq.wq);
+ }
+ if (!budget) {
+ tasklet_schedule(&conn->cq.tasklet);
+ return;
+ }
+
+ mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+ mlx5_fpga_conn_arm_cq(conn);
+}
+
+static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
+{
+ struct mlx5_fpga_conn *conn = (void *)data;
+
+ if (unlikely(!conn->qp.active))
+ return;
+ mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
+}
+
+static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
+{
+ struct mlx5_fpga_conn *conn;
+
+ conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
+ if (unlikely(!conn->qp.active))
+ return;
+ mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
+}
+
+static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
+ struct mlx5_wq_param wqp;
+ struct mlx5_cqe64 *cqe;
+ int inlen, err, eqn;
+ unsigned int irqn;
+ void *cqc, *in;
+ __be64 *pas;
+ u32 i;
+
+ cq_size = roundup_pow_of_two(cq_size);
+ MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
+ &conn->cq.wq_ctrl);
+ if (err)
+ return err;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
+ cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_cqwq;
+ }
+
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+ if (err)
+ goto err_cqwq;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
+
+ err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
+ kvfree(in);
+
+ if (err)
+ goto err_cqwq;
+
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ *conn->cq.mcq.arm_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+ conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
+ conn->cq.mcq.irqn = irqn;
+ conn->cq.mcq.uar = fdev->conn_res.uar;
+ tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
+ (unsigned long)conn);
+
+ mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
+
+ goto out;
+
+err_cqwq:
+ mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+out:
+ return err;
+}
+
+static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
+{
+ tasklet_disable(&conn->cq.tasklet);
+ tasklet_kill(&conn->cq.tasklet);
+ mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
+ mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+}
+
+static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ struct mlx5_wq_param wqp;
+
+ wqp.buf_numa_node = mdev->priv.numa_node;
+ wqp.db_numa_node = mdev->priv.numa_node;
+
+ return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
+ &conn->qp.wq_ctrl);
+}
+
+static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
+ unsigned int tx_size, unsigned int rx_size)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
+ void *in = NULL, *qpc;
+ int err, inlen;
+
+ conn->qp.rq.pc = 0;
+ conn->qp.rq.cc = 0;
+ conn->qp.rq.size = roundup_pow_of_two(rx_size);
+ conn->qp.sq.pc = 0;
+ conn->qp.sq.cc = 0;
+ conn->qp.sq.size = roundup_pow_of_two(tx_size);
+
+ MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
+ MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
+ err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
+ if (err)
+ goto out;
+
+ conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
+ conn->qp.rq.size, GFP_KERNEL);
+ if (!conn->qp.rq.bufs) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
+ conn->qp.sq.size, GFP_KERNEL);
+ if (!conn->qp.sq.bufs) {
+ err = -ENOMEM;
+ goto err_rq_bufs;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+ conn->qp.wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_sq_bufs;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
+ MLX5_SET(qpc, qpc, log_page_size,
+ conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
+ MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
+ MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
+ if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+ MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+
+ mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
+
+ err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
+ if (err)
+ goto err_sq_bufs;
+
+ conn->qp.mqp.event = mlx5_fpga_conn_event;
+ mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
+
+ goto out;
+
+err_sq_bufs:
+ kvfree(conn->qp.sq.bufs);
+err_rq_bufs:
+ kvfree(conn->qp.rq.bufs);
+err_wq:
+ mlx5_wq_destroy(&conn->qp.wq_ctrl);
+out:
+ kvfree(in);
+ return err;
+}
+
+static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
+{
+ int ix;
+
+ for (ix = 0; ix < conn->qp.rq.size; ix++) {
+ if (!conn->qp.rq.bufs[ix])
+ continue;
+ mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
+ kfree(conn->qp.rq.bufs[ix]);
+ conn->qp.rq.bufs[ix] = NULL;
+ }
+}
+
+static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_dma_buf *buf, *temp;
+ int ix;
+
+ for (ix = 0; ix < conn->qp.sq.size; ix++) {
+ buf = conn->qp.sq.bufs[ix];
+ if (!buf)
+ continue;
+ conn->qp.sq.bufs[ix] = NULL;
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ if (!buf->complete)
+ continue;
+ buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
+ }
+ list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
+ mlx5_fpga_conn_unmap_buf(conn, buf);
+ if (!buf->complete)
+ continue;
+ buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
+ }
+}
+
+static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
+{
+ mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
+ mlx5_fpga_conn_free_recv_bufs(conn);
+ mlx5_fpga_conn_flush_send_bufs(conn);
+ kvfree(conn->qp.sq.bufs);
+ kvfree(conn->qp.rq.bufs);
+ mlx5_wq_destroy(&conn->qp.wq_ctrl);
+}
+
+static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_core_dev *mdev = conn->fdev->mdev;
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
+
+ return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
+ &conn->qp.mqp);
+}
+
+static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
+ MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
+ MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+ MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
+ MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
+ MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
+ MLX5_SET(qpc, qpc, next_rcv_psn,
+ MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
+ MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
+ MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
+ MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
+ MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
+ MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
+ conn->qp.sgid_index);
+ MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
+ memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
+ MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
+ MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ u32 *qpc = NULL;
+ u32 opt_mask;
+ int err;
+
+ mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
+ MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
+ MLX5_SET(qpc, qpc, next_send_psn,
+ MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
+
+ opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
+ &conn->qp.mqp);
+ if (err) {
+ mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
+ goto out;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ int err;
+
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
+ err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
+ MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
+ goto out;
+ }
+
+ err = mlx5_fpga_conn_reset_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
+ goto err_fpga_qp;
+ }
+
+ err = mlx5_fpga_conn_init_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
+ goto err_fpga_qp;
+ }
+ conn->qp.active = true;
+
+ while (!mlx5_fpga_conn_post_recv_buf(conn))
+ ;
+
+ err = mlx5_fpga_conn_rtr_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
+ goto err_recv_bufs;
+ }
+
+ err = mlx5_fpga_conn_rts_qp(conn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
+ goto err_recv_bufs;
+ }
+ goto out;
+
+err_recv_bufs:
+ mlx5_fpga_conn_free_recv_bufs(conn);
+err_fpga_qp:
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
+ if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
+ MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
+ mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
+out:
+ return err;
+}
+
+struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr,
+ enum mlx5_ifc_fpga_qp_type qp_type)
+{
+ struct mlx5_fpga_conn *ret, *conn;
+ u8 *remote_mac, *remote_ip;
+ int err;
+
+ if (!attr->recv_cb)
+ return ERR_PTR(-EINVAL);
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ conn->fdev = fdev;
+ INIT_LIST_HEAD(&conn->qp.sq.backlog);
+
+ spin_lock_init(&conn->qp.sq.lock);
+
+ conn->recv_cb = attr->recv_cb;
+ conn->cb_arg = attr->cb_arg;
+
+ remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
+ err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err;
+ }
+
+ /* Build Modified EUI-64 IPv6 address from the MAC address */
+ remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
+ remote_ip[0] = 0xfe;
+ remote_ip[1] = 0x80;
+ addrconf_addr_eui48(&remote_ip[8], remote_mac);
+
+ err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err;
+ }
+
+ err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
+ MLX5_ROCE_VERSION_2,
+ MLX5_ROCE_L3_TYPE_IPV6,
+ remote_ip, remote_mac, true, 0);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_rsvd_gid;
+ }
+ mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
+
+ /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
+ * created during processing of the cqe
+ */
+ err = mlx5_fpga_conn_create_cq(conn,
+ (attr->tx_size + attr->rx_size) * 2);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_gid;
+ }
+
+ mlx5_fpga_conn_arm_cq(conn);
+
+ err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_cq;
+ }
+
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
+
+ err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
+ &conn->fpga_qpn);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
+ ret = ERR_PTR(err);
+ goto err_qp;
+ }
+
+ err = mlx5_fpga_conn_connect(conn);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_conn;
+ }
+
+ mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
+ ret = conn;
+ goto out;
+
+err_conn:
+ mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
+err_qp:
+ mlx5_fpga_conn_destroy_qp(conn);
+err_cq:
+ mlx5_fpga_conn_destroy_cq(conn);
+err_gid:
+ mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
+ NULL, false, 0);
+err_rsvd_gid:
+ mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
+err:
+ kfree(conn);
+out:
+ return ret;
+}
+
+void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
+{
+ struct mlx5_fpga_device *fdev = conn->fdev;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+ int err = 0;
+
+ conn->qp.active = false;
+ tasklet_disable(&conn->cq.tasklet);
+ synchronize_irq(conn->cq.mcq.irqn);
+
+ mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
+ &conn->qp.mqp);
+ if (err)
+ mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
+ mlx5_fpga_conn_destroy_qp(conn);
+ mlx5_fpga_conn_destroy_cq(conn);
+
+ mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
+ NULL, NULL, false, 0);
+ mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
+ kfree(conn);
+}
+
+int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
+{
+ int err;
+
+ err = mlx5_nic_vport_enable_roce(fdev->mdev);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
+ goto out;
+ }
+
+ fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
+ if (IS_ERR(fdev->conn_res.uar)) {
+ err = PTR_ERR(fdev->conn_res.uar);
+ mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
+ goto err_roce;
+ }
+ mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
+ fdev->conn_res.uar->index);
+
+ err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
+ if (err) {
+ mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
+ goto err_uar;
+ }
+ mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
+
+ err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
+ &fdev->conn_res.mkey);
+ if (err) {
+ mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
+ goto err_dealloc_pd;
+ }
+ mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
+
+ return 0;
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
+err_uar:
+ mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
+err_roce:
+ mlx5_nic_vport_disable_roce(fdev->mdev);
+out:
+ return err;
+}
+
+void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
+{
+ mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
+ mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
+ mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
+ mlx5_nic_vport_disable_roce(fdev->mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
new file mode 100644
index 000000000000..44bd9eccc711
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_CONN_H__
+#define __MLX5_FPGA_CONN_H__
+
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+
+#include "fpga/core.h"
+#include "fpga/sdk.h"
+#include "wq.h"
+
+struct mlx5_fpga_conn {
+ struct mlx5_fpga_device *fdev;
+
+ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ void *cb_arg;
+
+ /* FPGA QP */
+ u32 fpga_qpc[MLX5_ST_SZ_DW(fpga_qpc)];
+ u32 fpga_qpn;
+
+ /* CQ */
+ struct {
+ struct mlx5_cqwq wq;
+ struct mlx5_frag_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ struct tasklet_struct tasklet;
+ } cq;
+
+ /* QP */
+ struct {
+ bool active;
+ int sgid_index;
+ struct mlx5_wq_qp wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_qp mqp;
+ struct {
+ spinlock_t lock; /* Protects all SQ state */
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ struct mlx5_fpga_dma_buf **bufs;
+ struct list_head backlog;
+ } sq;
+ struct {
+ unsigned int pc;
+ unsigned int cc;
+ unsigned int size;
+ struct mlx5_fpga_dma_buf **bufs;
+ } rq;
+ } qp;
+};
+
+int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev);
+void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev);
+struct mlx5_fpga_conn *
+mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr,
+ enum mlx5_ifc_fpga_qp_type qp_type);
+void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn);
+int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf);
+
+#endif /* __MLX5_FPGA_CONN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
new file mode 100644
index 000000000000..31e5a2627eb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fpga/core.h"
+#include "fpga/conn.h"
+
+static const char *const mlx5_fpga_error_strings[] = {
+ "Null Syndrome",
+ "Corrupted DDR",
+ "Flash Timeout",
+ "Internal Link Error",
+ "Watchdog HW Failure",
+ "I2C Failure",
+ "Image Changed",
+ "Temperature Critical",
+};
+
+static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return NULL;
+
+ spin_lock_init(&fdev->state_lock);
+ fdev->state = MLX5_FPGA_STATUS_NONE;
+ return fdev;
+}
+
+static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
+{
+ switch (image) {
+ case MLX5_FPGA_IMAGE_USER:
+ return "user";
+ case MLX5_FPGA_IMAGE_FACTORY:
+ return "factory";
+ default:
+ return "unknown";
+ }
+}
+
+static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
+{
+ struct mlx5_fpga_query query;
+ int err;
+
+ err = mlx5_fpga_query(fdev->mdev, &query);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to query status: %d\n", err);
+ return err;
+ }
+
+ fdev->last_admin_image = query.admin_image;
+ fdev->last_oper_image = query.oper_image;
+
+ mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
+ mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ query.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
+{
+ int err;
+ struct mlx5_core_dev *mdev = fdev->mdev;
+
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set bypass on: %d\n", err);
+ return err;
+ }
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to reset SBU: %d\n", err);
+ return err;
+ }
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to set bypass off: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned long flags;
+ unsigned int max_num_qps;
+ int err;
+
+ if (!fdev)
+ return 0;
+
+ err = mlx5_fpga_device_load_check(fdev);
+ if (err)
+ goto out;
+
+ err = mlx5_fpga_caps(fdev->mdev,
+ fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+ if (err)
+ goto out;
+
+ mlx5_fpga_info(fdev, "device %u; %s image, version %u\n",
+ MLX5_CAP_FPGA(fdev->mdev, fpga_device),
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ MLX5_CAP_FPGA(fdev->mdev, image_version));
+
+ max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
+ err = mlx5_core_reserve_gids(mdev, max_num_qps);
+ if (err)
+ goto out;
+
+ err = mlx5_fpga_conn_device_init(fdev);
+ if (err)
+ goto err_rsvd_gid;
+
+ if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
+ err = mlx5_fpga_device_brb(fdev);
+ if (err)
+ goto err_conn_init;
+ }
+
+ goto out;
+
+err_conn_init:
+ mlx5_fpga_conn_device_cleanup(fdev);
+
+err_rsvd_gid:
+ mlx5_core_unreserve_gids(mdev, max_num_qps);
+out:
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS;
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ return err;
+}
+
+int mlx5_fpga_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, fpga)) {
+ mlx5_core_dbg(mdev, "FPGA capability not present\n");
+ return 0;
+ }
+
+ mlx5_core_dbg(mdev, "Initializing FPGA\n");
+
+ fdev = mlx5_fpga_device_alloc();
+ if (!fdev)
+ return -ENOMEM;
+
+ fdev->mdev = mdev;
+ mdev->fpga = fdev;
+
+ return 0;
+}
+
+void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned int max_num_qps;
+ unsigned long flags;
+ int err;
+
+ if (!fdev)
+ return;
+
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ return;
+ }
+ fdev->state = MLX5_FPGA_STATUS_NONE;
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+
+ if (fdev->last_oper_image == MLX5_FPGA_IMAGE_USER) {
+ err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
+ if (err)
+ mlx5_fpga_err(fdev, "Failed to re-set SBU bypass on: %d\n",
+ err);
+ }
+
+ mlx5_fpga_conn_device_cleanup(fdev);
+ max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
+ mlx5_core_unreserve_gids(mdev, max_num_qps);
+}
+
+void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ mlx5_fpga_device_stop(mdev);
+ kfree(fdev);
+ mdev->fpga = NULL;
+}
+
+static const char *mlx5_fpga_syndrome_to_string(u8 syndrome)
+{
+ if (syndrome < ARRAY_SIZE(mlx5_fpga_error_strings))
+ return mlx5_fpga_error_strings[syndrome];
+ return "Unknown";
+}
+
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ const char *event_name;
+ bool teardown = false;
+ unsigned long flags;
+ u8 syndrome;
+
+ if (event != MLX5_EVENT_TYPE_FPGA_ERROR) {
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
+ event);
+ return;
+ }
+
+ syndrome = MLX5_GET(fpga_error_event, data, syndrome);
+ event_name = mlx5_fpga_syndrome_to_string(syndrome);
+
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ switch (fdev->state) {
+ case MLX5_FPGA_STATUS_SUCCESS:
+ mlx5_fpga_warn(fdev, "Error %u: %s\n", syndrome, event_name);
+ teardown = true;
+ break;
+ default:
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected error event %u: %s\n",
+ syndrome, event_name);
+ }
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ /* We tear-down the card's interfaces and functionality because
+ * the FPGA bump-on-the-wire is misbehaving and we lose ability
+ * to communicate with the network. User may still be able to
+ * recover by re-programming or debugging the FPGA
+ */
+ if (teardown)
+ mlx5_trigger_health_work(fdev->mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
new file mode 100644
index 000000000000..82405ed84725
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_FPGA_CORE_H__
+#define __MLX5_FPGA_CORE_H__
+
+#ifdef CONFIG_MLX5_FPGA
+
+#include "fpga/cmd.h"
+
+/* Represents an Innova device */
+struct mlx5_fpga_device {
+ struct mlx5_core_dev *mdev;
+ spinlock_t state_lock; /* Protects state transitions */
+ enum mlx5_fpga_status state;
+ enum mlx5_fpga_image last_admin_image;
+ enum mlx5_fpga_image last_oper_image;
+
+ /* QP Connection resources */
+ struct {
+ u32 pdn;
+ struct mlx5_core_mkey mkey;
+ struct mlx5_uars_page *uar;
+ } conn_res;
+
+ struct mlx5_fpga_ipsec *ipsec;
+};
+
+#define mlx5_fpga_dbg(__adev, format, ...) \
+ dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_err(__adev, format, ...) \
+ dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn(__adev, format, ...) \
+ dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
+ dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
+ format, __func__, __LINE__, ##__VA_ARGS__)
+
+#define mlx5_fpga_notice(__adev, format, ...) \
+ dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+#define mlx5_fpga_info(__adev, format, ...) \
+ dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+int mlx5_fpga_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
+void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
+
+#else
+
+static inline int mlx5_fpga_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
+{
+}
+
+static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
+ void *data)
+{
+}
+
+#endif
+
+#endif /* __MLX5_FPGA_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
new file mode 100644
index 000000000000..42970e2a05ff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fpga/ipsec.h"
+#include "fpga/sdk.h"
+#include "fpga/core.h"
+
+#define SBU_QP_QUEUE_SIZE 8
+
+enum mlx5_ipsec_response_syndrome {
+ MLX5_IPSEC_RESPONSE_SUCCESS = 0,
+ MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
+ MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2,
+ MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+};
+
+enum mlx5_fpga_ipsec_sacmd_status {
+ MLX5_FPGA_IPSEC_SACMD_PENDING,
+ MLX5_FPGA_IPSEC_SACMD_SEND_FAIL,
+ MLX5_FPGA_IPSEC_SACMD_COMPLETE,
+};
+
+struct mlx5_ipsec_command_context {
+ struct mlx5_fpga_dma_buf buf;
+ struct mlx5_accel_ipsec_sa sa;
+ enum mlx5_fpga_ipsec_sacmd_status status;
+ int status_code;
+ struct completion complete;
+ struct mlx5_fpga_device *dev;
+ struct list_head list; /* Item in pending_cmds */
+};
+
+struct mlx5_ipsec_sadb_resp {
+ __be32 syndrome;
+ __be32 sw_sa_handle;
+ u8 reserved[24];
+} __packed;
+
+struct mlx5_fpga_ipsec {
+ struct list_head pending_cmds;
+ spinlock_t pending_cmds_lock; /* Protects pending_cmds */
+ u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
+ struct mlx5_fpga_conn *conn;
+};
+
+static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
+ MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
+ MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
+ return false;
+
+ return true;
+}
+
+static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf,
+ u8 status)
+{
+ struct mlx5_ipsec_command_context *context;
+
+ if (status) {
+ context = container_of(buf, struct mlx5_ipsec_command_context,
+ buf);
+ mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
+ status);
+ context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL;
+ complete(&context->complete);
+ }
+}
+
+static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
+{
+ switch (syndrome) {
+ case MLX5_IPSEC_RESPONSE_SUCCESS:
+ return 0;
+ case MLX5_IPSEC_RESPONSE_SADB_ISSUE:
+ return -EEXIST;
+ case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST:
+ return -EINVAL;
+ case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data;
+ struct mlx5_ipsec_command_context *context;
+ enum mlx5_ipsec_response_syndrome syndrome;
+ struct mlx5_fpga_device *fdev = cb_arg;
+ unsigned long flags;
+
+ if (buf->sg[0].size < sizeof(*resp)) {
+ mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
+ buf->sg[0].size, sizeof(*resp));
+ return;
+ }
+
+ mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n",
+ ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
+
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
+ struct mlx5_ipsec_command_context,
+ list);
+ if (context)
+ list_del(&context->list);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+
+ if (!context) {
+ mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
+ return;
+ }
+ mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
+
+ if (context->sa.sw_sa_handle != resp->sw_sa_handle) {
+ mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
+ ntohl(context->sa.sw_sa_handle),
+ ntohl(resp->sw_sa_handle));
+ return;
+ }
+
+ syndrome = ntohl(resp->syndrome);
+ context->status_code = syndrome_to_errno(syndrome);
+ context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE;
+
+ if (context->status_code)
+ mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n",
+ syndrome);
+ complete(&context->complete);
+}
+
+void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ struct mlx5_ipsec_command_context *context;
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned long flags;
+ int res = 0;
+
+ BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0);
+ if (!fdev || !fdev->ipsec)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ context = kzalloc(sizeof(*context), GFP_ATOMIC);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&context->sa, cmd, sizeof(*cmd));
+ context->buf.complete = mlx5_fpga_ipsec_send_complete;
+ context->buf.sg[0].size = sizeof(context->sa);
+ context->buf.sg[0].data = &context->sa;
+ init_completion(&context->complete);
+ context->dev = fdev;
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+
+ context->status = MLX5_FPGA_IPSEC_SACMD_PENDING;
+
+ res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
+ if (res) {
+ mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
+ res);
+ spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
+ list_del(&context->list);
+ spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+ kfree(context);
+ return ERR_PTR(res);
+ }
+ /* Context will be freed by wait func after completion */
+ return context;
+}
+
+int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
+{
+ struct mlx5_ipsec_command_context *context = ctx;
+ int res;
+
+ res = wait_for_completion_killable(&context->complete);
+ if (res) {
+ mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
+ return -EINTR;
+ }
+
+ if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
+ res = context->status_code;
+ else
+ res = -EIO;
+
+ kfree(context);
+ return res;
+}
+
+u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ u32 ret = 0;
+
+ if (mlx5_fpga_is_ipsec_device(mdev))
+ ret |= MLX5_ACCEL_IPSEC_DEVICE;
+ else
+ return ret;
+
+ if (!fdev->ipsec)
+ return ret;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
+ ret |= MLX5_ACCEL_IPSEC_ESP;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
+ ret |= MLX5_ACCEL_IPSEC_IPV6;
+
+ if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
+ ret |= MLX5_ACCEL_IPSEC_LSO;
+
+ return ret;
+}
+
+unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!fdev || !fdev->ipsec)
+ return 0;
+
+ return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ number_of_ipsec_counters);
+}
+
+int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int counters_count)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned int i;
+ u32 *data;
+ u32 count;
+ u64 addr;
+ int ret;
+
+ if (!fdev || !fdev->ipsec)
+ return 0;
+
+ addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ ipsec_counters_addr_low) +
+ ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
+ ipsec_counters_addr_high) << 32);
+
+ count = mlx5_fpga_ipsec_counters_count(mdev);
+
+ data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
+ MLX5_FPGA_ACCESS_TYPE_DONTCARE);
+ if (ret < 0) {
+ mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
+ ret);
+ goto out;
+ }
+ ret = 0;
+
+ if (count > counters_count)
+ count = counters_count;
+
+ /* Each counter is low word, then high. But each word is big-endian */
+ for (i = 0; i < count; i++)
+ counters[i] = (u64)ntohl(data[i * 2]) |
+ ((u64)ntohl(data[i * 2 + 1]) << 32);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_conn_attr init_attr = {0};
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_conn *conn;
+ int err;
+
+ if (!mlx5_fpga_is_ipsec_device(mdev))
+ return 0;
+
+ fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
+ if (!fdev->ipsec)
+ return -ENOMEM;
+
+ err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
+ fdev->ipsec->caps);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
+ err);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
+ spin_lock_init(&fdev->ipsec->pending_cmds_lock);
+
+ init_attr.rx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.tx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.recv_cb = mlx5_fpga_ipsec_recv;
+ init_attr.cb_arg = fdev;
+ conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
+ if (IS_ERR(conn)) {
+ err = PTR_ERR(conn);
+ mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
+ err);
+ goto error;
+ }
+ fdev->ipsec->conn = conn;
+ return 0;
+
+error:
+ kfree(fdev->ipsec);
+ fdev->ipsec = NULL;
+ return err;
+}
+
+void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!mlx5_fpga_is_ipsec_device(mdev))
+ return;
+
+ mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
+ kfree(fdev->ipsec);
+ fdev->ipsec = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
new file mode 100644
index 000000000000..26a3e4b56972
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_IPSEC_H__
+#define __MLX5_FPGA_IPSEC_H__
+
+#include "accel/ipsec.h"
+
+#ifdef CONFIG_MLX5_FPGA
+
+void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd);
+int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
+
+u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
+unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
+int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+ unsigned int counters_count);
+
+int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_ipsec_sa *cmd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline unsigned int
+mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
+ u64 *counters)
+{
+ return 0;
+}
+
+static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+#endif /* CONFIG_MLX5_FPGA */
+
+#endif /* __MLX5_FPGA_SADB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
new file mode 100644
index 000000000000..3c11d6e2160a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "fpga/core.h"
+#include "fpga/conn.h"
+#include "fpga/sdk.h"
+
+struct mlx5_fpga_conn *
+mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr)
+{
+ return mlx5_fpga_conn_create(fdev, attr, MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_create);
+
+void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn)
+{
+ mlx5_fpga_conn_destroy(conn);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_destroy);
+
+int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ return mlx5_fpga_conn_send(conn, buf);
+}
+EXPORT_SYMBOL(mlx5_fpga_sbu_conn_sendmsg);
+
+static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
+ u64 addr, u8 *buf)
+{
+ size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+ size_t bytes_done = 0;
+ u8 actual_size;
+ int err;
+
+ if (!fdev->mdev)
+ return -ENOTCONN;
+
+ while (bytes_done < size) {
+ actual_size = min(max_size, (size - bytes_done));
+
+ err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
+ addr + bytes_done,
+ buf + bytes_done, false);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to read over I2C: %d\n",
+ err);
+ break;
+ }
+
+ bytes_done += actual_size;
+ }
+
+ return err;
+}
+
+static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
+ u64 addr, u8 *buf)
+{
+ size_t max_size = MLX5_FPGA_ACCESS_REG_SIZE_MAX;
+ size_t bytes_done = 0;
+ u8 actual_size;
+ int err;
+
+ if (!fdev->mdev)
+ return -ENOTCONN;
+
+ while (bytes_done < size) {
+ actual_size = min(max_size, (size - bytes_done));
+
+ err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
+ addr + bytes_done,
+ buf + bytes_done, true);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to write FPGA crspace\n");
+ break;
+ }
+
+ bytes_done += actual_size;
+ }
+
+ return err;
+}
+
+int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type)
+{
+ int ret;
+
+ switch (access_type) {
+ case MLX5_FPGA_ACCESS_TYPE_I2C:
+ ret = mlx5_fpga_mem_read_i2c(fdev, size, addr, buf);
+ if (ret)
+ return ret;
+ break;
+ default:
+ mlx5_fpga_warn(fdev, "Unexpected read access_type %u\n",
+ access_type);
+ return -EACCES;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(mlx5_fpga_mem_read);
+
+int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type)
+{
+ int ret;
+
+ switch (access_type) {
+ case MLX5_FPGA_ACCESS_TYPE_I2C:
+ ret = mlx5_fpga_mem_write_i2c(fdev, size, addr, buf);
+ if (ret)
+ return ret;
+ break;
+ default:
+ mlx5_fpga_warn(fdev, "Unexpected write access_type %u\n",
+ access_type);
+ return -EACCES;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(mlx5_fpga_mem_write);
+
+int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf)
+{
+ return mlx5_fpga_sbu_caps(fdev->mdev, buf, size);
+}
+EXPORT_SYMBOL(mlx5_fpga_get_sbu_caps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
new file mode 100644
index 000000000000..baa537e54a49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef MLX5_FPGA_SDK_H
+#define MLX5_FPGA_SDK_H
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+/**
+ * DOC: Innova SDK
+ * This header defines the in-kernel API for Innova FPGA client drivers.
+ */
+
+enum mlx5_fpga_access_type {
+ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
+ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
+};
+
+struct mlx5_fpga_conn;
+struct mlx5_fpga_device;
+
+/**
+ * struct mlx5_fpga_dma_entry - A scatter-gather DMA entry
+ */
+struct mlx5_fpga_dma_entry {
+ /** @data: Virtual address pointer to the data */
+ void *data;
+ /** @size: Size in bytes of the data */
+ unsigned int size;
+ /** @dma_addr: Private member. Physical DMA-mapped address of the data */
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct mlx5_fpga_dma_buf - A packet buffer
+ * May contain up to 2 scatter-gather data entries
+ */
+struct mlx5_fpga_dma_buf {
+ /** @dma_dir: DMA direction */
+ enum dma_data_direction dma_dir;
+ /** @sg: Scatter-gather entries pointing to the data in memory */
+ struct mlx5_fpga_dma_entry sg[2];
+ /** @list: Item in SQ backlog, for TX packets */
+ struct list_head list;
+ /**
+ * @complete: Completion routine, for TX packets
+ * @conn: FPGA Connection this packet was sent to
+ * @fdev: FPGA device this packet was sent to
+ * @buf: The packet buffer
+ * @status: 0 if successful, or an error code otherwise
+ */
+ void (*complete)(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status);
+};
+
+/**
+ * struct mlx5_fpga_conn_attr - FPGA connection attributes
+ * Describes the attributes of a connection
+ */
+struct mlx5_fpga_conn_attr {
+ /** @tx_size: Size of connection TX queue, in packets */
+ unsigned int tx_size;
+ /** @rx_size: Size of connection RX queue, in packets */
+ unsigned int rx_size;
+ /**
+ * @recv_cb: Callback function which is called for received packets
+ * @cb_arg: The value provided in mlx5_fpga_conn_attr.cb_arg
+ * @buf: A buffer containing a received packet
+ *
+ * buf is guaranteed to only contain a single scatter-gather entry.
+ * The size of the actual packet received is specified in buf.sg[0].size
+ * When this callback returns, the packet buffer may be re-used for
+ * subsequent receives.
+ */
+ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ void *cb_arg;
+};
+
+/**
+ * mlx5_fpga_sbu_conn_create() - Initialize a new FPGA SBU connection
+ * @fdev: The FPGA device
+ * @attr: Attributes of the new connection
+ *
+ * Sets up a new FPGA SBU connection with the specified attributes.
+ * The receive callback function may be called for incoming messages even
+ * before this function returns.
+ *
+ * The caller must eventually destroy the connection by calling
+ * mlx5_fpga_sbu_conn_destroy.
+ *
+ * Return: A new connection, or ERR_PTR() error value otherwise.
+ */
+struct mlx5_fpga_conn *
+mlx5_fpga_sbu_conn_create(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_conn_attr *attr);
+
+/**
+ * mlx5_fpga_sbu_conn_destroy() - Destroy an FPGA SBU connection
+ * @conn: The FPGA SBU connection to destroy
+ *
+ * Cleans up an FPGA SBU connection which was previously created with
+ * mlx5_fpga_sbu_conn_create.
+ */
+void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
+
+/**
+ * mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
+ * @fdev: An FPGA SBU connection
+ * @buf: The packet buffer
+ *
+ * Queues a packet for transmission over an FPGA SBU connection.
+ * The buffer should not be modified or freed until completion.
+ * Upon completion, the buf's complete() callback is invoked, indicating the
+ * success or error status of the transmission.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_sbu_conn_sendmsg(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_dma_buf *buf);
+
+/**
+ * mlx5_fpga_mem_read() - Read from FPGA memory address space
+ * @fdev: The FPGA device
+ * @size: Size of chunk to read, in bytes
+ * @addr: Starting address to read from, in FPGA address space
+ * @buf: Buffer to read into
+ * @access_type: Method for reading
+ *
+ * Reads from the specified address into the specified buffer.
+ * The address may point to configuration space or to DDR.
+ * Large reads may be performed internally as several non-atomic operations.
+ * This function may sleep, so should not be called from atomic contexts.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_mem_read(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type);
+
+/**
+ * mlx5_fpga_mem_write() - Write to FPGA memory address space
+ * @fdev: The FPGA device
+ * @size: Size of chunk to write, in bytes
+ * @addr: Starting address to write to, in FPGA address space
+ * @buf: Buffer which contains data to write
+ * @access_type: Method for writing
+ *
+ * Writes the specified buffer data to FPGA memory at the specified address.
+ * The address may point to configuration space or to DDR.
+ * Large writes may be performed internally as several non-atomic operations.
+ * This function may sleep, so should not be called from atomic contexts.
+ *
+ * Return: 0 if successful, or an error value otherwise.
+ */
+int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr,
+ void *buf, enum mlx5_fpga_access_type access_type);
+
+/**
+ * mlx5_fpga_get_sbu_caps() - Read the SBU capabilities
+ * @fdev: The FPGA device
+ * @size: Size of the buffer to read into
+ * @buf: Buffer to read the capabilities into
+ *
+ * Reads the FPGA SBU capabilities into the specified buffer.
+ * The format of the capabilities buffer is SBU-dependent.
+ *
+ * Return: 0 if successful
+ * -EINVAL if the buffer is not large enough to contain SBU caps
+ * or any other error value otherwise.
+ */
+int mlx5_fpga_get_sbu_caps(struct mlx5_fpga_device *fdev, int size, void *buf);
+
+#endif /* MLX5_FPGA_SDK_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index fcec7bedd3cd..e750f07793b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -78,28 +78,33 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
- MLX5_SET(create_flow_table_in, in, level, level);
- MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
- MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
- MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ en_encap_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
+ en_encap_decap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
- MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action, 1);
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_id, next_ft->id);
}
break;
case FS_FT_OP_MOD_LAG_DEMUX:
MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
if (next_ft)
- MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.lag_master_next_table_id,
next_ft->id);
break;
}
@@ -146,10 +151,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
- lag_master_next_table_id, next_ft->id);
+ flow_table_context.lag_master_next_table_id, next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
- lag_master_next_table_id, 0);
+ flow_table_context.lag_master_next_table_id, 0);
}
} else {
if (ft->vport) {
@@ -160,11 +165,14 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(modify_flow_table_in, in, table_miss_id,
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action, 1);
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_id,
next_ft->id);
} else {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action, 0);
}
}
@@ -232,11 +240,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8f5125ccd8d4..e8690fe46bf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -104,6 +104,7 @@ struct node_caps {
size_t arr_sz;
long *caps;
};
+
static struct init_tree_node {
enum fs_node_type type;
struct init_tree_node *children;
@@ -376,11 +377,9 @@ static void del_rule(struct fs_node *node)
int err;
bool update_fte = false;
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ match_value = kvzalloc(match_len, GFP_KERNEL);
+ if (!match_value)
return;
- }
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
@@ -1157,7 +1156,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return ERR_PTR(-ENOMEM);
@@ -1777,7 +1776,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
struct mlx5_flow_namespace *ns;
/* Create the root namespace */
- root_ns = mlx5_vzalloc(sizeof(*root_ns));
+ root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
return NULL;
@@ -1860,7 +1859,6 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
static int init_root_ns(struct mlx5_flow_steering *steering)
{
-
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
goto cleanup;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 1bc14d0fded8..fa33d59ab485 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/cmd.h>
#include <linux/module.h>
#include "mlx5_core.h"
+#include "../../mlxfw/mlxfw.h"
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
@@ -195,3 +196,298 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ int force_state;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, force_teardown)) {
+ mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
+
+ ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ force_state = MLX5_GET(teardown_hca_out, out, force_state);
+ if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+ mlx5_core_err(dev, "teardown with force mode failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+enum mlxsw_reg_mcc_instruction {
+ MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcc_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(mcc_reg, in, instruction, instr);
+ MLX5_SET(mcc_reg, in, component_index, component_index);
+ MLX5_SET(mcc_reg, in, update_handle, update_handle);
+ MLX5_SET(mcc_reg, in, component_size, component_size);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCC, 0, 1);
+}
+
+static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
+ u32 *update_handle, u8 *error_code,
+ u8 *control_state)
+{
+ u32 out[MLX5_ST_SZ_DW(mcc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(mcc_reg, in, update_handle, *update_handle);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCC, 0, 0);
+ if (err)
+ goto out;
+
+ *update_handle = MLX5_GET(mcc_reg, out, update_handle);
+ *error_code = MLX5_GET(mcc_reg, out, error_code);
+ *control_state = MLX5_GET(mcc_reg, out, control_state);
+
+out:
+ return err;
+}
+
+static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
+ u32 update_handle,
+ u32 offset, u16 size,
+ u8 *data)
+{
+ int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
+ u32 out[MLX5_ST_SZ_DW(mcda_reg)];
+ int i, j, dw_size = size >> 2;
+ __be32 data_element;
+ u32 *in;
+
+ in = kzalloc(in_size, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(mcda_reg, in, update_handle, update_handle);
+ MLX5_SET(mcda_reg, in, offset, offset);
+ MLX5_SET(mcda_reg, in, size, size);
+
+ for (i = 0; i < dw_size; i++) {
+ j = i * 4;
+ data_element = htonl(*(u32 *)&data[j]);
+ memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
+ }
+
+ err = mlx5_core_access_reg(dev, in, in_size, out,
+ sizeof(out), MLX5_REG_MCDA, 0, 1);
+ kfree(in);
+ return err;
+}
+
+static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
+ u16 component_index,
+ u32 *max_component_size,
+ u8 *log_mcda_word_size,
+ u16 *mcda_max_write_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
+ int offset = MLX5_ST_SZ_DW(mcqi_reg);
+ u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(mcqi_reg, in, component_index, component_index);
+ MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCQI, 0, 0);
+ if (err)
+ goto out;
+
+ *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
+ *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
+ *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
+
+out:
+ return err;
+}
+
+struct mlx5_mlxfw_dev {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlx5_core_dev *mlx5_core_dev;
+};
+
+static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
+ p_align_bits, p_max_write_size);
+}
+
+static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u8 control_state, error_code;
+ int err;
+
+ *fwhandle = 0;
+ err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
+ if (err)
+ return err;
+
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+}
+
+static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+}
+
+static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
+}
+
+static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+}
+
+static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0,
+ fwhandle, 0);
+}
+
+static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u8 control_state, error_code;
+ int err;
+
+ err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
+ if (err)
+ return err;
+
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+}
+
+static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+
+ mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
+ fwhandle, 0);
+}
+
+static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
+ .component_query = mlx5_component_query,
+ .fsm_lock = mlx5_fsm_lock,
+ .fsm_component_update = mlx5_fsm_component_update,
+ .fsm_block_download = mlx5_fsm_block_download,
+ .fsm_component_verify = mlx5_fsm_component_verify,
+ .fsm_activate = mlx5_fsm_activate,
+ .fsm_query_state = mlx5_fsm_query_state,
+ .fsm_cancel = mlx5_fsm_cancel,
+ .fsm_release = mlx5_fsm_release
+};
+
+int mlx5_firmware_flash(struct mlx5_core_dev *dev,
+ const struct firmware *firmware)
+{
+ struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
+ .mlxfw_dev = {
+ .ops = &mlx5_mlxfw_dev_ops,
+ .psid = dev->board_id,
+ .psid_size = strlen(dev->board_id),
+ },
+ .mlx5_core_dev = dev
+ };
+
+ if (!MLX5_CAP_GEN(dev, mcam_reg) ||
+ !MLX5_CAP_MCAM_REG(dev, mcqi) ||
+ !MLX5_CAP_MCAM_REG(dev, mcc) ||
+ !MLX5_CAP_MCAM_REG(dev, mcda)) {
+ pr_info("%s flashing isn't supported by the running FW\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f27f84ffbc85..4b6b03d6297f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -67,6 +67,7 @@ enum {
enum {
MLX5_DROP_NEW_HEALTH_WORK,
+ MLX5_DROP_NEW_RECOVERY_WORK,
};
static u8 get_nic_state(struct mlx5_core_dev *dev)
@@ -111,14 +112,14 @@ static int in_fatal(struct mlx5_core_dev *dev)
return 0;
}
-void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
trigger_cmd_completions(dev);
}
@@ -185,6 +186,7 @@ static void health_care(struct work_struct *work)
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
+ unsigned long flags;
health = container_of(work, struct mlx5_core_health, work);
priv = container_of(health, struct mlx5_priv, health);
@@ -192,13 +194,13 @@ static void health_care(struct work_struct *work)
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
- spin_lock(&health->wq_lock);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ spin_lock_irqsave(&health->wq_lock, flags);
+ if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
dev_err(&dev->pdev->dev,
"new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
}
static const char *hsynd_str(u8 synd)
@@ -269,6 +271,20 @@ static unsigned long get_next_poll_jiffies(void)
return next;
}
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
+
+ spin_lock_irqsave(&health->wq_lock, flags);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock_irqrestore(&health->wq_lock, flags);
+}
+
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
@@ -293,13 +309,7 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- spin_lock(&health->wq_lock);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->work);
- else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ mlx5_trigger_health_work(dev);
}
out:
@@ -313,6 +323,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
init_timer(&health->timer);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -332,14 +343,26 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&health->recover_work);
cancel_work_sync(&health->work);
}
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&dev->priv.health.recover_work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
new file mode 100644
index 000000000000..eb04e97d8765
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "ipoib.h"
+
+static void mlx5i_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
+static void mlx5i_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+static int mlx5i_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
+static void mlx5i_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
+static int mlx5i_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+static void mlx5i_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+static int mlx5i_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+
+static void mlx5i_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+static int mlx5i_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
+static int mlx5i_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
+static int mlx5i_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
+static int mlx5i_flash_device(struct net_device *netdev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
+const struct ethtool_ops mlx5i_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_strings = mlx5i_get_strings,
+ .get_sset_count = mlx5i_get_sset_count,
+ .get_ethtool_stats = mlx5i_get_ethtool_stats,
+ .get_ringparam = mlx5i_get_ringparam,
+ .set_ringparam = mlx5i_set_ringparam,
+ .flash_device = mlx5i_flash_device,
+ .get_channels = mlx5i_get_channels,
+ .set_channels = mlx5i_set_channels,
+ .get_coalesce = mlx5i_get_coalesce,
+ .set_coalesce = mlx5i_set_coalesce,
+ .get_ts_info = mlx5i_get_ts_info,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index cc1858752e70..1ee5bce85901 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -36,20 +36,38 @@
#include "ipoib.h"
#define IB_DEFAULT_Q_KEY 0xb1b
+#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_dev_init(struct net_device *dev);
static void mlx5i_dev_cleanup(struct net_device *dev);
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
+ .ndo_change_mtu = mlx5i_change_mtu,
+ .ndo_do_ioctl = mlx5i_ioctl,
};
/* IPoIB mlx5 netdev profile */
+static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
+ mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+
+ /* RQ size in ipoib by default is 512 */
+ params->log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+ params->lro_en = false;
+}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
static void mlx5i_init(struct mlx5_core_dev *mdev,
@@ -59,19 +77,18 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ /* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+ mutex_init(&priv->state_lock);
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5i_build_nic_params(mdev, &priv->channels.params);
- /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
- priv->channels.params.lro_en = false;
-
- mutex_init(&priv->state_lock);
-
+ /* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
@@ -82,6 +99,7 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
netdev->hw_features |= NETIF_F_RXHASH;
netdev->netdev_ops = &mlx5i_netdev_ops;
+ netdev->ethtool_ops = &mlx5i_ethtool_ops;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
@@ -102,7 +120,7 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
void *qpc;
inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -290,6 +308,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
@@ -297,6 +316,35 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
/* mlx5i netdev NDos */
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_channels new_channels = {};
+ int curr_mtu;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ curr_mtu = netdev->mtu;
+ netdev->mtu = new_mtu;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err) {
+ netdev->mtu = curr_mtu;
+ goto out;
+ }
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
static int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -310,6 +358,20 @@ static int mlx5i_dev_init(struct net_device *dev)
return 0;
}
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx5e_hwstamp_set(priv, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx5e_hwstamp_get(priv, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -336,6 +398,8 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
+ mlx5e_timestamp_init(priv);
+
mutex_unlock(&priv->state_lock);
return 0;
@@ -359,6 +423,7 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -510,4 +575,3 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
mlx5e_destroy_mdev_resources(priv->mdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 213191a78464..a0f405f520f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -38,6 +38,13 @@
#define MLX5I_MAX_NUM_TC 1
+extern const struct ethtool_ops mlx5i_ethtool_ops;
+
+#define MLX5_IB_GRH_BYTES 40
+#define MLX5_IPOIB_ENCAP_LEN 4
+#define MLX5_IPOIB_PSEUDO_LEN 20
+#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
+
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b5d5519542e8..a3a836bdcfd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -61,6 +61,11 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct delayed_work bond_work;
struct notifier_block nb;
+
+ /* Admin state. Allow lag only if allowed is true
+ * even if network conditions for lag were met
+ */
+ bool allowed;
};
/* General purpose, use for short periods of time.
@@ -214,6 +219,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct lag_tracker tracker;
u8 v2p_port1, v2p_port2;
int i, err;
+ bool do_bond;
if (!dev0 || !dev1)
return;
@@ -222,13 +228,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
- if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
- if (mlx5_sriov_is_enabled(dev0) ||
- mlx5_sriov_is_enabled(dev1)) {
- mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
- return;
- }
+ do_bond = tracker.is_bonded && ldev->allowed;
+ if (do_bond && !mlx5_lag_is_bonded(ldev)) {
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
@@ -237,7 +239,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1);
- } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
&v2p_port2);
@@ -252,7 +254,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
"Failed to modify LAG (%d)\n",
err);
}
- } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1);
@@ -411,6 +413,15 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+{
+ if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
+ (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
+ return false;
+ else
+ return true;
+}
+
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{
struct mlx5_lag *ldev;
@@ -420,6 +431,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev;
}
@@ -444,7 +456,9 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev;
+
mutex_unlock(&lag_mutex);
}
@@ -464,10 +478,10 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex);
}
-
/* Must be called with intf_mutex held */
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
@@ -543,6 +557,44 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
+{
+ struct mlx5_lag *ldev;
+ int ret = 0;
+ bool lag_active;
+
+ mlx5_dev_list_lock();
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ lag_active = mlx5_lag_is_bonded(ldev);
+ if (!mlx5_lag_check_prereq(ldev) && allow) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (ldev->allowed == allow)
+ goto unlock;
+ ldev->allowed = allow;
+ if ((lag_active && !allow) || allow)
+ mlx5_do_bond(ldev);
+unlock:
+ mlx5_dev_list_unlock();
+ return ret;
+}
+
+int mlx5_lag_forbid(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_set_state(dev, false);
+}
+
+int mlx5_lag_allow(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_set_state(dev, true);
+}
+
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
@@ -586,4 +638,3 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
/* If bonded, we do not add an IB device for PF1. */
return false;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
new file mode 100644
index 000000000000..de2aed44ab85
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/etherdevice.h>
+#include <linux/idr.h>
+#include "mlx5_core.h"
+
+void mlx5_init_reserved_gids(struct mlx5_core_dev *dev)
+{
+ unsigned int tblsz = MLX5_CAP_ROCE(dev, roce_address_table_size);
+
+ ida_init(&dev->roce.reserved_gids.ida);
+ dev->roce.reserved_gids.start = tblsz;
+ dev->roce.reserved_gids.count = 0;
+}
+
+void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
+{
+ WARN_ON(!ida_is_empty(&dev->roce.reserved_gids.ida));
+ dev->roce.reserved_gids.start = 0;
+ dev->roce.reserved_gids.count = 0;
+ ida_destroy(&dev->roce.reserved_gids.ida);
+}
+
+int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
+{
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
+ return -EPERM;
+ }
+ if (dev->roce.reserved_gids.start < count) {
+ mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
+ count);
+ return -ENOMEM;
+ }
+ if (dev->roce.reserved_gids.count + count > MLX5_MAX_RESERVED_GIDS) {
+ mlx5_core_warn(dev, "Unable to reserve %d more GIDs\n", count);
+ return -ENOMEM;
+ }
+
+ dev->roce.reserved_gids.start -= count;
+ dev->roce.reserved_gids.count += count;
+ mlx5_core_dbg(dev, "Reserved %u GIDs starting at %u\n",
+ dev->roce.reserved_gids.count,
+ dev->roce.reserved_gids.start);
+ return 0;
+}
+
+void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
+{
+ WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
+ WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
+ count, dev->roce.reserved_gids.count);
+
+ dev->roce.reserved_gids.start += count;
+ dev->roce.reserved_gids.count -= count;
+ mlx5_core_dbg(dev, "%u GIDs starting at %u left reserved\n",
+ dev->roce.reserved_gids.count,
+ dev->roce.reserved_gids.start);
+}
+
+int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
+{
+ int end = dev->roce.reserved_gids.start +
+ dev->roce.reserved_gids.count;
+ int index = 0;
+
+ index = ida_simple_get(&dev->roce.reserved_gids.ida,
+ dev->roce.reserved_gids.start, end,
+ GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ mlx5_core_dbg(dev, "Allocating reserved GID %u\n", index);
+ *gid_index = index;
+ return 0;
+}
+
+void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
+{
+ mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
+ ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
+}
+
+unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
+{
+ return dev->roce.reserved_gids.count;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
+
+int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
+ u8 roce_version, u8 roce_l3_type, const u8 *gid,
+ const u8 *mac, bool vlan, u16 vlan_id)
+{
+#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+ void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
+ char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
+ source_l3_address);
+ void *addr_mac = MLX5_ADDR_OF(roce_addr_layout, in_addr,
+ source_mac_47_32);
+ int gidsz = MLX5_FLD_SZ_BYTES(roce_addr_layout, source_l3_address);
+
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -EINVAL;
+
+ if (gid) {
+ if (vlan) {
+ MLX5_SET_RA(in_addr, vlan_valid, 1);
+ MLX5_SET_RA(in_addr, vlan_id, vlan_id);
+ }
+
+ ether_addr_copy(addr_mac, mac);
+ MLX5_SET_RA(in_addr, roce_version, roce_version);
+ MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
+ memcpy(addr_l3_addr, gid, gidsz);
+ }
+
+ MLX5_SET(set_roce_address_in, in, roce_address_index, index);
+ MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
new file mode 100644
index 000000000000..7550b1cc8c6a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_MLX5_H__
+#define __LIB_MLX5_H__
+
+void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
+void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
+int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
+void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
+int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
+void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 13be264587f1..c065132b956d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,6 +56,9 @@
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
+#include "lib/mlx5.h"
+#include "fpga/core.h"
+#include "accel/ipsec.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -356,12 +359,11 @@ static void mlx5_disable_msix(struct mlx5_core_dev *dev)
kfree(priv->msix_arr);
}
-struct mlx5_reg_host_endianess {
+struct mlx5_reg_host_endianness {
u8 he;
u8 rsvd[15];
};
-
#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
enum {
@@ -475,7 +477,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
req_endianness =
MLX5_CAP_ATOMIC(dev,
- supported_atomic_req_8B_endianess_mode_1);
+ supported_atomic_req_8B_endianness_mode_1);
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
@@ -487,7 +489,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
- MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
+ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
@@ -562,8 +564,8 @@ query_ex:
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
- struct mlx5_reg_host_endianess he_in;
- struct mlx5_reg_host_endianess he_out;
+ struct mlx5_reg_host_endianness he_in;
+ struct mlx5_reg_host_endianness he_out;
int err;
if (!mlx5_core_is_pf(dev))
@@ -936,6 +938,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_mkey_table(dev);
+ mlx5_init_reserved_gids(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -956,8 +960,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_eswitch_cleanup;
}
+ err = mlx5_fpga_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
+ goto err_sriov_cleanup;
+ }
+
return 0;
+err_sriov_cleanup:
+ mlx5_sriov_cleanup(dev);
err_eswitch_cleanup:
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -981,11 +993,13 @@ out:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1020,7 +1034,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI);
- goto out;
+ goto out_err;
}
err = mlx5_cmd_init(dev);
@@ -1151,6 +1165,17 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
+ err = mlx5_fpga_device_start(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ goto err_fpga_start;
+ }
+ err = mlx5_accel_ipsec_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+ goto err_ipsec_start;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1169,6 +1194,11 @@ out:
return 0;
err_reg_dev:
+ mlx5_accel_ipsec_cleanup(dev);
+err_ipsec_start:
+ mlx5_fpga_device_stop(dev);
+
+err_fpga_start:
mlx5_sriov_detach(dev);
err_sriov:
@@ -1228,7 +1258,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
int err = 0;
if (cleanup)
- mlx5_drain_health_wq(dev);
+ mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
@@ -1239,9 +1269,15 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto out;
}
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+
mlx5_sriov_detach(dev);
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_detach(dev->priv.eswitch);
@@ -1266,8 +1302,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_cmd_cleanup(dev);
out:
- clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
mutex_unlock(&dev->intf_state_mutex);
return err;
}
@@ -1412,7 +1446,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
- mlx5_enter_error_state(dev);
+ mlx5_enter_error_state(dev, false);
mlx5_unload_one(dev, priv, false);
/* In case of kernel call drain the health wq */
if (state) {
@@ -1499,24 +1533,52 @@ static const struct pci_error_handlers mlx5_err_handler = {
.resume = mlx5_pci_resume
};
+static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
+{
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, force_teardown)) {
+ mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
+ return -EAGAIN;
+ }
+
+ ret = mlx5_cmd_force_teardown_hca(dev);
+ if (ret) {
+ mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ return ret;
+ }
+
+ mlx5_enter_error_state(dev, true);
+
+ return 0;
+}
+
static void shutdown(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
+ int err;
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
- mlx5_unload_one(dev, priv, false);
+ err = mlx5_try_fast_unload(dev);
+ if (err)
+ mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
- { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
- { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
- { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
@@ -1524,6 +1586,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
{ PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fbc6e9e9e305..6a3d6bef7dd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -37,10 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/if_link.h>
+#include <linux/firmware.h>
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0-1"
-#define DRIVER_RELDATE "January 2015"
+#define DRIVER_VERSION "5.0-0"
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
@@ -84,12 +84,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
@@ -153,6 +154,8 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
@@ -168,4 +171,7 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
+int mlx5_lag_allow(struct mlx5_core_dev *dev);
+int mlx5_lag_forbid(struct mlx5_core_dev *dev);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index a57d5a81eb05..e36d3e3675f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -279,7 +279,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int i;
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
@@ -376,7 +376,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
*nclaimed = 0;
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -403,7 +403,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
-
if (nclaimed)
*nclaimed = num_claimed;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 141583daf5a2..1975d4388d4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -47,8 +47,8 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
u32 *in = NULL;
void *data;
- in = mlx5_vzalloc(inlen);
- out = mlx5_vzalloc(outlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
@@ -454,7 +454,7 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u32 *in;
int err;
- in = mlx5_vzalloc(sz);
+ in = kvzalloc(sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index cbbcef2884be..340f281c9801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mlx5/cmd.h>
@@ -519,23 +518,3 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-
-int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
- u32 *out_of_buffer)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
- void *out;
- int err;
-
- out = mlx5_vzalloc(outlen);
- if (!out)
- return -ENOMEM;
-
- err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
- if (!err)
- *out_of_buffer = MLX5_GET(query_q_counter_out, out,
- out_of_buffer);
-
- kfree(out);
- return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index e08627785590..bcdf7779c48d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -175,15 +175,20 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!mlx5_core_is_pf(dev))
return -EPERM;
- if (num_vfs && mlx5_lag_is_active(dev)) {
- mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
- return -EINVAL;
+ if (num_vfs) {
+ int ret;
+
+ ret = mlx5_lag_forbid(dev);
+ if (ret && (ret != -ENODEV))
+ return ret;
}
- if (num_vfs)
+ if (num_vfs) {
err = mlx5_sriov_enable(pdev, num_vfs);
- else
+ } else {
mlx5_sriov_disable(pdev);
+ mlx5_lag_allow(dev);
+ }
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 3099630015d7..f774de6f5fcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -162,7 +162,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -221,7 +221,7 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
void *srqc;
int err;
- srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
if (!srq_out)
return -ENOMEM;
@@ -256,7 +256,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -320,7 +320,7 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
void *xrc_srqc;
int err;
- xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
@@ -357,7 +357,7 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -390,7 +390,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -417,7 +417,7 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
void *rmpc;
int err;
- rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
if (!rmp_out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index a00ff49eec18..5e128d7a9ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -284,7 +284,7 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 15c2294dd2b4..5abfec1c3399 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -172,7 +172,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u8 *out_addr;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -197,11 +197,9 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
void *nic_vport_ctx;
u8 *perm_mac;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in,
field_select.permanent_address, 1);
@@ -231,7 +229,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -251,7 +249,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
void *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -501,7 +499,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -521,7 +519,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -551,7 +549,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
return -EOPNOTSUPP;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -577,7 +575,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -879,11 +877,9 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_err(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
MLX5_SET(modify_nic_vport_context_in, in,
@@ -913,11 +909,9 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
@@ -932,12 +926,16 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
+ if (atomic_inc_return(&mdev->roce.roce_en) != 1)
+ return 0;
return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
+ if (atomic_dec_return(&mdev->roce.roce_en) != 0)
+ return 0;
return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
@@ -952,7 +950,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- in = mlx5_vzalloc(in_sz);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 921673c42bc9..6bcfc25350f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -54,6 +54,12 @@ static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
}
+static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
+{
+ return mlx5_wq_cyc_get_byte_size(&wq->rq) +
+ mlx5_wq_cyc_get_byte_size(&wq->sq);
+}
+
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
return mlx5_cqwq_get_size(wq) << wq->log_stride;
@@ -99,6 +105,46 @@ err_db_free:
return err;
}
+int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
+ wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
+
+ wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
+ wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->rq.buf = wq_ctrl->buf.direct.buf;
+ wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
+ wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
+ wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index d8afed898c31..718589d0cec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -34,6 +34,8 @@
#define __MLX5_WQ_H__
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
struct mlx5_wq_param {
int linear;
@@ -60,6 +62,11 @@ struct mlx5_wq_cyc {
u8 log_stride;
};
+struct mlx5_wq_qp {
+ struct mlx5_wq_cyc rq;
+ struct mlx5_wq_cyc sq;
+};
+
struct mlx5_cqwq {
struct mlx5_frag_buf frag_buf;
__be32 *db;
@@ -87,6 +94,10 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl);
@@ -146,6 +157,22 @@ static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
*wq->db = cpu_to_be32(wq->cc & 0xffffff);
}
+static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
+{
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit */
+ dma_rmb();
+
+ return cqe;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
new file mode 100644
index 000000000000..186ebe783f97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -0,0 +1,13 @@
+#
+# Mellanox firmware flash library configuration
+#
+
+config MLXFW
+ tristate "Mellanox Technologies firmware flash module"
+ ---help---
+ This driver supports Mellanox Technologies Firmware
+ flashing common logic.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxfw.
+ select XZ_DEC
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Makefile b/drivers/net/ethernet/mellanox/mlxfw/Makefile
new file mode 100644
index 000000000000..7448b301104c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MLXFW) += mlxfw.o
+mlxfw-objs := mlxfw_fsm.o mlxfw_mfa2_tlv_multi.o mlxfw_mfa2.o
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
new file mode 100644
index 000000000000..7a712b6b09ec
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -0,0 +1,111 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_H
+#define _MLXFW_H
+
+#include <linux/firmware.h>
+
+enum mlxfw_fsm_state {
+ MLXFW_FSM_STATE_IDLE,
+ MLXFW_FSM_STATE_LOCKED,
+ MLXFW_FSM_STATE_INITIALIZE,
+ MLXFW_FSM_STATE_DOWNLOAD,
+ MLXFW_FSM_STATE_VERIFY,
+ MLXFW_FSM_STATE_APPLY,
+ MLXFW_FSM_STATE_ACTIVATE,
+};
+
+enum mlxfw_fsm_state_err {
+ MLXFW_FSM_STATE_ERR_OK,
+ MLXFW_FSM_STATE_ERR_ERROR,
+ MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR,
+ MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY,
+ MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED,
+ MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT,
+ MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET,
+ MLXFW_FSM_STATE_ERR_MAX,
+};
+
+struct mlxfw_dev;
+
+struct mlxfw_dev_ops {
+ int (*component_query)(struct mlxfw_dev *mlxfw_dev, u16 component_index,
+ u32 *p_max_size, u8 *p_align_bits,
+ u16 *p_max_write_size);
+
+ int (*fsm_lock)(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle);
+
+ int (*fsm_component_update)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size);
+
+ int (*fsm_block_download)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset);
+
+ int (*fsm_component_verify)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index);
+
+ int (*fsm_activate)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ int (*fsm_query_state)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err);
+
+ void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+};
+
+struct mlxfw_dev {
+ const struct mlxfw_dev_ops *ops;
+ const char *psid;
+ u16 psid_size;
+};
+
+#if IS_REACHABLE(CONFIG_MLXFW)
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware);
+#else
+static inline
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
new file mode 100644
index 000000000000..2cf89126fb23
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include "mlxfw.h"
+#include "mlxfw_mfa2.h"
+
+#define MLXFW_FSM_STATE_WAIT_CYCLE_MS 200
+#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
+#define MLXFW_FSM_STATE_WAIT_ROUNDS \
+ (MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
+#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
+
+static const char * const mlxfw_fsm_state_err_str[] = {
+ [MLXFW_FSM_STATE_ERR_ERROR] =
+ "general error",
+ [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] =
+ "component hash mismatch",
+ [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] =
+ "component not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] =
+ "unknown key",
+ [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] =
+ "authentication failed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] =
+ "component was not signed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] =
+ "key not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] =
+ "bad format",
+ [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] =
+ "pending reset",
+ [MLXFW_FSM_STATE_ERR_MAX] =
+ "unknown error"
+};
+
+static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state fsm_state)
+{
+ enum mlxfw_fsm_state_err fsm_state_err;
+ enum mlxfw_fsm_state curr_fsm_state;
+ int times;
+ int err;
+
+ times = MLXFW_FSM_STATE_WAIT_ROUNDS;
+retry:
+ err = mlxfw_dev->ops->fsm_query_state(mlxfw_dev, fwhandle,
+ &curr_fsm_state, &fsm_state_err);
+ if (err)
+ return err;
+
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ pr_err("Firmware flash failed: %s\n",
+ mlxfw_fsm_state_err_str[fsm_state_err]);
+ return -EINVAL;
+ }
+ if (curr_fsm_state != fsm_state) {
+ if (--times == 0) {
+ pr_err("Timeout reached on FSM state change");
+ return -ETIMEDOUT;
+ }
+ msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
+ goto retry;
+ }
+ return 0;
+}
+
+#define MLXFW_ALIGN_DOWN(x, align_bits) ((x) & ~((1 << (align_bits)) - 1))
+#define MLXFW_ALIGN_UP(x, align_bits) \
+ MLXFW_ALIGN_DOWN((x) + ((1 << (align_bits)) - 1), (align_bits))
+
+static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ struct mlxfw_mfa2_component *comp)
+{
+ u16 comp_max_write_size;
+ u8 comp_align_bits;
+ u32 comp_max_size;
+ u16 block_size;
+ u8 *block_ptr;
+ u32 offset;
+ int err;
+
+ err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
+ &comp_max_size, &comp_align_bits,
+ &comp_max_write_size);
+ if (err)
+ return err;
+
+ comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
+ if (comp->data_size > comp_max_size) {
+ pr_err("Component %d is of size %d which is bigger than limit %d\n",
+ comp->index, comp->data_size, comp_max_size);
+ return -EINVAL;
+ }
+
+ comp_max_write_size = MLXFW_ALIGN_DOWN(comp_max_write_size,
+ comp_align_bits);
+
+ pr_debug("Component update\n");
+ err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
+ comp->index,
+ comp->data_size);
+ if (err)
+ return err;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_DOWNLOAD);
+ if (err)
+ goto err_out;
+
+ pr_debug("Component download\n");
+ for (offset = 0;
+ offset < MLXFW_ALIGN_UP(comp->data_size, comp_align_bits);
+ offset += comp_max_write_size) {
+ block_ptr = comp->data + offset;
+ block_size = (u16) min_t(u32, comp->data_size - offset,
+ comp_max_write_size);
+ err = mlxfw_dev->ops->fsm_block_download(mlxfw_dev, fwhandle,
+ block_ptr, block_size,
+ offset);
+ if (err)
+ goto err_out;
+ }
+
+ pr_debug("Component verify\n");
+ err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
+ comp->index);
+ if (err)
+ goto err_out;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ mlxfw_dev->ops->fsm_cancel(mlxfw_dev, fwhandle);
+ return err;
+}
+
+static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ struct mlxfw_mfa2_file *mfa2_file)
+{
+ u32 component_count;
+ int err;
+ int i;
+
+ err = mlxfw_mfa2_file_component_count(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size,
+ &component_count);
+ if (err) {
+ pr_err("Could not find device PSID in MFA2 file\n");
+ return err;
+ }
+
+ for (i = 0; i < component_count; i++) {
+ struct mlxfw_mfa2_component *comp;
+
+ comp = mlxfw_mfa2_file_component_get(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size, i);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+
+ pr_info("Flashing component type %d\n", comp->index);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp);
+ mlxfw_mfa2_file_component_put(comp);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware)
+{
+ struct mlxfw_mfa2_file *mfa2_file;
+ u32 fwhandle;
+ int err;
+
+ if (!mlxfw_mfa2_check(firmware)) {
+ pr_err("Firmware file is not MFA2\n");
+ return -EINVAL;
+ }
+
+ mfa2_file = mlxfw_mfa2_file_init(firmware);
+ if (IS_ERR(mfa2_file))
+ return PTR_ERR(mfa2_file);
+
+ pr_info("Initialize firmware flash process\n");
+ err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
+ if (err) {
+ pr_err("Could not lock the firmware FSM\n");
+ goto err_fsm_lock;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_idle_to_locked;
+
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file);
+ if (err)
+ goto err_flash_components;
+
+ pr_debug("Activate image\n");
+ err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
+ if (err) {
+ pr_err("Could not activate the downloaded image\n");
+ goto err_fsm_activate;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_activate_to_locked;
+
+ pr_debug("Handle release\n");
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+
+ pr_info("Firmware flash done.\n");
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return 0;
+
+err_state_wait_activate_to_locked:
+err_fsm_activate:
+err_flash_components:
+err_state_wait_idle_to_locked:
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+err_fsm_lock:
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return err;
+}
+EXPORT_SYMBOL(mlxfw_firmware_flash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox firmware flash lib");
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
new file mode 100644
index 000000000000..993cb5ba934e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -0,0 +1,619 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw_mfa2: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/xz.h>
+#include "mlxfw_mfa2.h"
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_tlv_multi.h"
+
+/* MFA2 FILE
+ * +----------------------------------+
+ * | MFA2 finger print |
+ * +----------------------------------+
+ * | package descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | package descriptor tlv +-----> |num_devices=n |
+ * | +------------------------------+ | |num_components=m |
+ * +----------------------------------+ |CB offset |
+ * | device descriptor multi_tlv | |... |
+ * | +------------------------------+ | | |
+ * | | PSID tlv | | +-----------------+
+ * | +------------------------------+ |
+ * | | component index tlv | |
+ * | +------------------------------+ |
+ * +----------------------------------+
+ * | component descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | component descriptor tlv +-----> |Among others: |
+ * | +------------------------------+ | |CB offset=o |
+ * +----------------------------------+ |comp index=i |
+ * | | |... |
+ * | | | |
+ * | | +-----------------+
+ * | COMPONENT BLOCK (CB) |
+ * | |
+ * | |
+ * | |
+ * +----------------------------------+
+ *
+ * On the top level, an MFA2 file contains:
+ * - Fingerprint
+ * - Several multi_tlvs (TLVs of type MLXFW_MFA2_TLV_MULTI, as defined in
+ * mlxfw_mfa2_format.h)
+ * - Compresses content block
+ *
+ * The first multi_tlv
+ * -------------------
+ * The first multi TLV is treated as package descriptor, and expected to have a
+ * first TLV child of type MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR which contains all
+ * the global information needed to parse the file. Among others, it contains
+ * the number of device descriptors and component descriptor following this
+ * multi TLV.
+ *
+ * The device descriptor multi_tlv
+ * -------------------------------
+ * The multi TLVs following the package descriptor are treated as device
+ * descriptor, and are expected to have the following children:
+ * - PSID TLV child of type MLXFW_MFA2_TLV_PSID containing that device PSID.
+ * - Component index of type MLXFW_MFA2_TLV_COMPONENT_PTR that contains that
+ * device component index.
+ *
+ * The component descriptor multi_tlv
+ * ----------------------------------
+ * The multi TLVs following the device descriptor multi TLVs are treated as
+ * component descriptor, and are expected to have a first child of type
+ * MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR that contains mostly the component index,
+ * needed for the flash process and the offset to the binary within the
+ * component block.
+ */
+
+static const u8 mlxfw_mfa2_fingerprint[] = "MLNX.MFA2.XZ.00!";
+static const int mlxfw_mfa2_fingerprint_len =
+ sizeof(mlxfw_mfa2_fingerprint) - 1;
+
+static const u8 mlxfw_mfa2_comp_magic[] = "#BIN.COMPONENT!#";
+static const int mlxfw_mfa2_comp_magic_len = sizeof(mlxfw_mfa2_comp_magic) - 1;
+
+bool mlxfw_mfa2_check(const struct firmware *fw)
+{
+ if (fw->size < sizeof(mlxfw_mfa2_fingerprint))
+ return false;
+
+ return memcmp(fw->data, mlxfw_mfa2_fingerprint,
+ mlxfw_mfa2_fingerprint_len) == 0;
+}
+
+static bool
+mlxfw_mfa2_tlv_multi_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ /* Check that all children are valid */
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("Multi has invalid child");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_dev_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *dev_tlv,
+ u16 dev_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv_psid *psid;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 cptr_count;
+ u16 cptr_idx;
+ int err;
+
+ pr_debug("Device %d\n", dev_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!multi) {
+ pr_err("Device %d is not a valid TLV error\n", dev_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Validate the device has PSID tlv */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv) {
+ pr_err("Device %d does not have PSID\n", dev_idx);
+ return false;
+ }
+
+ psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!psid) {
+ pr_err("Device %d PSID TLV is not valid\n", dev_idx);
+ return false;
+ }
+
+ print_hex_dump_debug(" -- Device PSID ", DUMP_PREFIX_NONE, 16, 16,
+ psid->psid, be16_to_cpu(tlv->len), true);
+
+ /* Validate the device has COMPONENT_PTR */
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &cptr_count);
+ if (err)
+ return false;
+
+ if (cptr_count == 0) {
+ pr_err("Device %d has no components\n", dev_idx);
+ return false;
+ }
+
+ for (cptr_idx = 0; cptr_idx < cptr_count; cptr_idx++) {
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ cptr_idx);
+ if (!tlv)
+ return false;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, tlv);
+ if (!cptr) {
+ pr_err("Device %d COMPONENT_PTR TLV is not valid\n",
+ dev_idx);
+ return false;
+ }
+
+ pr_debug(" -- Component index %d\n",
+ be16_to_cpu(cptr->component_index));
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_comp_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *comp_tlv,
+ u16 comp_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *cdesc;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *tlv;
+
+ pr_debug("Component %d\n", comp_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi) {
+ pr_err("Component %d is not a valid TLV error\n", comp_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Check that component have COMPONENT_DESCRIPTOR as first child */
+ tlv = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!tlv) {
+ pr_err("Component descriptor %d multi TLV error\n", comp_idx);
+ return false;
+ }
+
+ cdesc = mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, tlv);
+ if (!cdesc) {
+ pr_err("Component %d does not have a valid descriptor\n",
+ comp_idx);
+ return false;
+ }
+ pr_debug(" -- Component type %d\n", be16_to_cpu(cdesc->identifier));
+ pr_debug(" -- Offset 0x%llx and size %d\n",
+ ((u64) be32_to_cpu(cdesc->cb_offset_h) << 32)
+ | be32_to_cpu(cdesc->cb_offset_l), be32_to_cpu(cdesc->size));
+
+ return true;
+}
+
+static bool mlxfw_mfa2_file_validate(const struct mlxfw_mfa2_file *mfa2_file)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ pr_debug("Validating file\n");
+
+ /* check that all the devices exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each device */
+ if (!mlxfw_mfa2_file_dev_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+
+ /* check that all the components exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_component,
+ mfa2_file->component_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each component */
+ if (!mlxfw_mfa2_file_comp_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+ return true;
+}
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
+{
+ const struct mlxfw_mfa2_tlv_package_descriptor *pd;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *first_tlv;
+ struct mlxfw_mfa2_file *mfa2_file;
+ const void *first_tlv_ptr;
+ const void *cb_top_ptr;
+
+ mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ if (!mfa2_file)
+ return ERR_PTR(-ENOMEM);
+
+ mfa2_file->fw = fw;
+ first_tlv_ptr = fw->data + NLA_ALIGN(mlxfw_mfa2_fingerprint_len);
+ first_tlv = mlxfw_mfa2_tlv_get(mfa2_file, first_tlv_ptr);
+ if (!first_tlv) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, first_tlv);
+ if (!multi) {
+ pr_err("First TLV is not of valid multi type\n");
+ goto err_out;
+ }
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ goto err_out;
+
+ pd = mlxfw_mfa2_tlv_package_descriptor_get(mfa2_file, multi_child);
+ if (!pd) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ mfa2_file->first_dev = mlxfw_mfa2_tlv_next(mfa2_file, first_tlv);
+ if (!mfa2_file->first_dev) {
+ pr_err("First device TLV is not valid\n");
+ goto err_out;
+ }
+
+ mfa2_file->dev_count = be16_to_cpu(pd->num_devices);
+ mfa2_file->first_component = mlxfw_mfa2_tlv_advance(mfa2_file,
+ mfa2_file->first_dev,
+ mfa2_file->dev_count);
+ mfa2_file->component_count = be16_to_cpu(pd->num_components);
+ mfa2_file->cb = fw->data + NLA_ALIGN(be32_to_cpu(pd->cb_offset));
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, mfa2_file->cb)) {
+ pr_err("Component block is out side the file\n");
+ goto err_out;
+ }
+ mfa2_file->cb_archive_size = be32_to_cpu(pd->cb_archive_size);
+ cb_top_ptr = mfa2_file->cb + mfa2_file->cb_archive_size - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, cb_top_ptr)) {
+ pr_err("Component block size is too big\n");
+ goto err_out;
+ }
+
+ if (!mlxfw_mfa2_file_validate(mfa2_file))
+ goto err_out;
+ return mfa2_file;
+err_out:
+ kfree(mfa2_file);
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct mlxfw_mfa2_tlv_multi *
+mlxfw_mfa2_tlv_dev_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u16 psid_size)
+{
+ const struct mlxfw_mfa2_tlv_psid *tlv_psid;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *dev_tlv;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u32 idx;
+
+ /* for each device tlv */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, dev_tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!dev_tlv)
+ return NULL;
+
+ dev_multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!dev_multi)
+ return NULL;
+
+ /* find psid child and compare */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv)
+ return NULL;
+ if (be16_to_cpu(tlv->len) != psid_size)
+ continue;
+
+ tlv_psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!tlv_psid)
+ return NULL;
+
+ if (memcmp(psid, tlv_psid->psid, psid_size) == 0)
+ return dev_multi;
+ }
+
+ return NULL;
+}
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count)
+{
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ u16 count;
+ int err;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return -EINVAL;
+
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &count);
+ if (err)
+ return err;
+
+ *p_count = count;
+ return 0;
+}
+
+static int mlxfw_mfa2_xz_dec_run(struct xz_dec *xz_dec, struct xz_buf *xz_buf,
+ bool *finished)
+{
+ enum xz_ret xz_ret;
+
+ xz_ret = xz_dec_run(xz_dec, xz_buf);
+
+ switch (xz_ret) {
+ case XZ_STREAM_END:
+ *finished = true;
+ return 0;
+ case XZ_OK:
+ *finished = false;
+ return 0;
+ case XZ_MEM_ERROR:
+ pr_err("xz no memory\n");
+ return -ENOMEM;
+ case XZ_DATA_ERROR:
+ pr_err("xz file corrupted\n");
+ return -EINVAL;
+ case XZ_FORMAT_ERROR:
+ pr_err("xz format not found\n");
+ return -EINVAL;
+ case XZ_OPTIONS_ERROR:
+ pr_err("unsupported xz option\n");
+ return -EINVAL;
+ case XZ_MEMLIMIT_ERROR:
+ pr_err("xz dictionary too small\n");
+ return -EINVAL;
+ default:
+ pr_err("xz error %d\n", xz_ret);
+ return -EINVAL;
+ }
+}
+
+static int mlxfw_mfa2_file_cb_offset_xz(const struct mlxfw_mfa2_file *mfa2_file,
+ off_t off, size_t size, u8 *buf)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf dec_buf;
+ off_t curr_off = 0;
+ bool finished;
+ int err;
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32) -1);
+ if (!xz_dec)
+ return -EINVAL;
+
+ dec_buf.in_size = mfa2_file->cb_archive_size;
+ dec_buf.in = mfa2_file->cb;
+ dec_buf.in_pos = 0;
+ dec_buf.out = buf;
+
+ /* decode up to the offset */
+ do {
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = min_t(size_t, size, off - curr_off);
+ if (dec_buf.out_size == 0)
+ break;
+
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+ if (err)
+ goto out;
+ if (finished) {
+ pr_err("xz section too short\n");
+ err = -EINVAL;
+ goto out;
+ }
+ curr_off += dec_buf.out_pos;
+ } while (curr_off != off);
+
+ /* decode the needed section */
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = size;
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+out:
+ xz_dec_end(xz_dec);
+ return err;
+}
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
+ u16 comp_index)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *comp_tlv;
+
+ if (comp_index > mfa2_file->component_count)
+ return NULL;
+
+ comp_tlv = mlxfw_mfa2_tlv_advance(mfa2_file, mfa2_file->first_component,
+ comp_index);
+ if (!comp_tlv)
+ return NULL;
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi)
+ return NULL;
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ return NULL;
+
+ return mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, multi_child);
+}
+
+struct mlxfw_mfa2_comp_data {
+ struct mlxfw_mfa2_component comp;
+ u8 buff[0];
+};
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *cptr_tlv;
+ u16 comp_idx;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return NULL;
+
+ cptr_tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ component_index);
+ if (!cptr_tlv)
+ return NULL;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, cptr_tlv);
+ if (!cptr)
+ return NULL;
+
+ comp_idx = be16_to_cpu(cptr->component_index);
+ return mlxfw_mfa2_file_component_tlv_get(mfa2_file, comp_idx);
+}
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *comp;
+ struct mlxfw_mfa2_comp_data *comp_data;
+ u32 comp_buf_size;
+ off_t cb_offset;
+ u32 comp_size;
+ int err;
+
+ comp = mlxfw_mfa2_file_component_find(mfa2_file, psid, psid_size,
+ component_index);
+ if (!comp)
+ return ERR_PTR(-EINVAL);
+
+ cb_offset = (u64) be32_to_cpu(comp->cb_offset_h) << 32 |
+ be32_to_cpu(comp->cb_offset_l);
+ comp_size = be32_to_cpu(comp->size);
+ comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
+
+ comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ if (!comp_data)
+ return ERR_PTR(-ENOMEM);
+ comp_data->comp.data_size = comp_size;
+ comp_data->comp.index = be16_to_cpu(comp->identifier);
+ err = mlxfw_mfa2_file_cb_offset_xz(mfa2_file, cb_offset, comp_buf_size,
+ comp_data->buff);
+ if (err) {
+ pr_err("Component could not be reached in CB\n");
+ goto err_out;
+ }
+
+ if (memcmp(comp_data->buff, mlxfw_mfa2_comp_magic,
+ mlxfw_mfa2_comp_magic_len) != 0) {
+ pr_err("Component has wrong magic\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
+ return &comp_data->comp;
+err_out:
+ kfree(comp_data);
+ return ERR_PTR(err);
+}
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
+{
+ const struct mlxfw_mfa2_comp_data *comp_data;
+
+ comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
+ kfree(comp_data);
+}
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
+{
+ kfree(mfa2_file);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
new file mode 100644
index 000000000000..20472aa139cd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_H
+#define _MLXFW_MFA2_H
+
+#include <linux/firmware.h>
+#include "mlxfw.h"
+
+struct mlxfw_mfa2_component {
+ u16 index;
+ u32 data_size;
+ u8 *data;
+};
+
+struct mlxfw_mfa2_file;
+
+bool mlxfw_mfa2_check(const struct firmware *fw);
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw);
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count);
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index);
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *component);
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
new file mode 100644
index 000000000000..f667942b1ea3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_FILE_H
+#define _MLXFW_MFA2_FILE_H
+
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+
+struct mlxfw_mfa2_file {
+ const struct firmware *fw;
+ const struct mlxfw_mfa2_tlv *first_dev;
+ u16 dev_count;
+ const struct mlxfw_mfa2_tlv *first_component;
+ u16 component_count;
+ const void *cb; /* components block */
+ u32 cb_archive_size; /* size of compressed components block */
+};
+
+static inline bool mlxfw_mfa2_valid_ptr(const struct mlxfw_mfa2_file *mfa2_file,
+ const void *ptr)
+{
+ const void *valid_to = mfa2_file->fw->data + mfa2_file->fw->size;
+ const void *valid_from = mfa2_file->fw->data;
+
+ return ptr > valid_from && ptr < valid_to;
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
new file mode 100644
index 000000000000..dd66737c033d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_FORMAT_H
+#define _MLXFW_MFA2_FORMAT_H
+
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+
+enum mlxfw_mfa2_tlv_type {
+ MLXFW_MFA2_TLV_MULTI_PART = 0x01,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR = 0x02,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR = 0x04,
+ MLXFW_MFA2_TLV_COMPONENT_PTR = 0x22,
+ MLXFW_MFA2_TLV_PSID = 0x2A,
+};
+
+enum mlxfw_mfa2_compression_type {
+ MLXFW_MFA2_COMPRESSION_TYPE_NONE,
+ MLXFW_MFA2_COMPRESSION_TYPE_XZ,
+};
+
+struct mlxfw_mfa2_tlv_package_descriptor {
+ __be16 num_components;
+ __be16 num_devices;
+ __be32 cb_offset;
+ __be32 cb_archive_size;
+ __be32 cb_size_h;
+ __be32 cb_size_l;
+ u8 padding[3];
+ u8 cv_compression;
+ __be32 user_data_offset;
+} __packed;
+
+MLXFW_MFA2_TLV(package_descriptor, struct mlxfw_mfa2_tlv_package_descriptor,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR);
+
+struct mlxfw_mfa2_tlv_multi {
+ __be16 num_extensions;
+ __be16 total_len;
+} __packed;
+
+MLXFW_MFA2_TLV(multi, struct mlxfw_mfa2_tlv_multi,
+ MLXFW_MFA2_TLV_MULTI_PART);
+
+struct mlxfw_mfa2_tlv_psid {
+ u8 psid[0];
+} __packed;
+
+MLXFW_MFA2_TLV_VARSIZE(psid, struct mlxfw_mfa2_tlv_psid,
+ MLXFW_MFA2_TLV_PSID);
+
+struct mlxfw_mfa2_tlv_component_ptr {
+ __be16 storage_id;
+ __be16 component_index;
+ __be32 storage_address;
+} __packed;
+
+MLXFW_MFA2_TLV(component_ptr, struct mlxfw_mfa2_tlv_component_ptr,
+ MLXFW_MFA2_TLV_COMPONENT_PTR);
+
+struct mlxfw_mfa2_tlv_component_descriptor {
+ __be16 pldm_classification;
+ __be16 identifier;
+ __be32 cb_offset_h;
+ __be32 cb_offset_l;
+ __be32 size;
+} __packed;
+
+MLXFW_MFA2_TLV(component_descriptor, struct mlxfw_mfa2_tlv_component_descriptor,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
new file mode 100644
index 000000000000..cc013e77b326
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -0,0 +1,98 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_TLV_H
+#define _MLXFW_MFA2_TLV_H
+
+#include <linux/kernel.h>
+#include "mlxfw_mfa2_file.h"
+
+struct mlxfw_mfa2_tlv {
+ u8 version;
+ u8 type;
+ __be16 len;
+ u8 data[0];
+} __packed;
+
+static inline const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_get(const struct mlxfw_mfa2_file *mfa2_file, const void *ptr)
+{
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, ptr) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, ptr + sizeof(struct mlxfw_mfa2_tlv)))
+ return NULL;
+ return ptr;
+}
+
+static inline const void *
+mlxfw_mfa2_tlv_payload_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv, u8 payload_type,
+ size_t payload_size, bool varsize)
+{
+ void *tlv_top;
+
+ tlv_top = (void *) tlv + be16_to_cpu(tlv->len) - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, tlv) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, tlv_top))
+ return NULL;
+ if (tlv->type != payload_type)
+ return NULL;
+ if (varsize && (be16_to_cpu(tlv->len) < payload_size))
+ return NULL;
+ if (!varsize && (be16_to_cpu(tlv->len) != payload_size))
+ return NULL;
+
+ return tlv->data;
+}
+
+#define MLXFW_MFA2_TLV(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ false); \
+}
+
+#define MLXFW_MFA2_TLV_VARSIZE(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ true); \
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
new file mode 100644
index 000000000000..0094b92a233b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -0,0 +1,126 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "MFA2: " fmt
+
+#include "mlxfw_mfa2_tlv_multi.h"
+#include <uapi/linux/netlink.h>
+
+#define MLXFW_MFA2_TLV_TOTAL_SIZE(tlv) \
+ NLA_ALIGN(sizeof(*(tlv)) + be16_to_cpu((tlv)->len))
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ size_t multi_len;
+
+ multi_len = NLA_ALIGN(sizeof(struct mlxfw_mfa2_tlv_multi));
+ return mlxfw_mfa2_tlv_get(mfa2_file, (void *) multi + multi_len);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ u16 tlv_len;
+ void *next;
+
+ tlv_len = MLXFW_MFA2_TLV_TOTAL_SIZE(tlv);
+
+ if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+ tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ }
+
+ next = (void *) tlv + tlv_len;
+ return mlxfw_mfa2_tlv_get(mfa2_file, next);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count)
+ if (!tlv)
+ return NULL;
+ return tlv;
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 skip = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return NULL;
+ }
+ if (tlv->type == type)
+ if (skip++ == index)
+ return tlv;
+ }
+ return NULL;
+}
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 count = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return -EINVAL;
+ }
+
+ if (tlv->type == type)
+ count++;
+ }
+ *p_count = count;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
new file mode 100644
index 000000000000..2c667894f3a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_TLV_MULTI_H
+#define _MLXFW_MFA2_TLV_MULTI_H
+
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_file.h"
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index);
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count);
+
+#define mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count) \
+ for (idx = 0, tlv = from_tlv; idx < (count); \
+ idx++, tlv = mlxfw_mfa2_tlv_next(mfa2_file, tlv))
+
+#define mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) \
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, \
+ mlxfw_mfa2_tlv_multi_child(mfa2_file, multi), \
+ be16_to_cpu(multi->num_extensions) + 1)
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ef23eaedc2ff..695adff89d71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -74,7 +74,9 @@ config MLXSW_SPECTRUM
tristate "Mellanox Technologies Spectrum support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
+ depends on BRIDGE || BRIDGE=n
select PARMAN
+ select MLXFW
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 2fb8c6585ac7..62fc42f396bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -16,7 +16,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_dpipe.o
+ spectrum_cnt.o spectrum_dpipe.o \
+ spectrum_fid.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 7fb35395adf5..6e966af72fc4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -344,15 +344,17 @@ struct mlxsw_bus {
u8 features;
};
+struct mlxsw_fw_rev {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+};
+
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
struct device *dev;
- struct {
- u16 major;
- u16 minor;
- u16 subminor;
- } fw_rev;
+ struct mlxsw_fw_rev fw_rev;
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 46304ffb9449..5ae110172c22 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -40,6 +40,7 @@
#include <linux/list.h>
#include "item.h"
+#include "trap.h"
#include "core_acl_flex_actions.h"
enum mlxsw_afa_set_type {
@@ -662,6 +663,16 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
#define MLXSW_AFA_TRAPDISC_CODE 0x03
#define MLXSW_AFA_TRAPDISC_SIZE 1
+enum mlxsw_afa_trapdisc_trap_action {
+ MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
+ MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
+};
+
+/* afa_trapdisc_trap_action
+ * Trap Action.
+ */
+MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
+
enum mlxsw_afa_trapdisc_forward_action {
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
};
@@ -671,11 +682,20 @@ enum mlxsw_afa_trapdisc_forward_action {
*/
MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+/* afa_trapdisc_trap_id
+ * Trap ID to configure.
+ */
+MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
- enum mlxsw_afa_trapdisc_forward_action forward_action)
+ enum mlxsw_afa_trapdisc_trap_action trap_action,
+ enum mlxsw_afa_trapdisc_forward_action forward_action,
+ u16 trap_id)
{
+ mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
+ mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
@@ -686,11 +706,27 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
if (!act)
return -ENOBUFS;
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
+ MLXSW_TRAP_ID_ACL0);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index bd8b91d02880..f99c341b2497 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -60,6 +60,7 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c75e9141e3ec..9807ef814e42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -56,6 +56,7 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -102,6 +103,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 0af3338bfcb4..a6441208e9d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -155,7 +155,7 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
/* pci_cqe_trap_id
* Trap ID that captured the packet.
*/
-MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
/* pci_cqe_crc
* Length include CRC. Indicates the length field includes
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 83b277c8090e..1bd34d9a7b9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -958,7 +958,7 @@ enum mlxsw_flood_table_type {
MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET = 3,
MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
};
@@ -5491,6 +5491,81 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MCIA - Management Cable Info Access
+ * -----------------------------------
+ * MCIA register is used to access the SFP+ and QSFP connector's EPROM.
+ */
+
+#define MLXSW_REG_MCIA_ID 0x9014
+#define MLXSW_REG_MCIA_LEN 0x40
+
+MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
+
+/* reg_mcia_l
+ * Lock bit. Setting this bit will lock the access to the specific
+ * cable. Used for updating a full page in a cable EPROM. Any access
+ * other then subsequence writes will fail while the port is locked.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
+
+/* reg_mcia_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+
+/* reg_mcia_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcia, status, 0x00, 0, 8);
+
+/* reg_mcia_i2c_device_address
+ * I2C device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, i2c_device_address, 0x04, 24, 8);
+
+/* reg_mcia_page_number
+ * Page number.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, page_number, 0x04, 16, 8);
+
+/* reg_mcia_device_address
+ * Device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
+
+/* reg_mcia_size
+ * Number of bytes to read/write (up to 48 bytes).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
+
+#define MLXSW_SP_REG_MCIA_EEPROM_SIZE 48
+
+/* reg_mcia_eeprom
+ * Bytes to read/write.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
+
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
+ u8 page_number, u16 device_addr,
+ u8 size, u8 i2c_device_addr)
+{
+ MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_module_set(payload, module);
+ mlxsw_reg_mcia_l_set(payload, lock);
+ mlxsw_reg_mcia_page_number_set(payload, page_number);
+ mlxsw_reg_mcia_device_address_set(payload, device_addr);
+ mlxsw_reg_mcia_size_set(payload, size);
+ mlxsw_reg_mcia_i2c_device_address_set(payload, i2c_device_addr);
+}
+
/* MPAT - Monitoring Port Analyzer Table
* -------------------------------------
* MPAT Register is used to query and configure the Switch PortAnalyzer Table.
@@ -5643,6 +5718,222 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCQI - Management Component Query Information
+ * ---------------------------------------------
+ * This register allows querying information about firmware components.
+ */
+#define MLXSW_REG_MCQI_ID 0x9061
+#define MLXSW_REG_MCQI_BASE_LEN 0x18
+#define MLXSW_REG_MCQI_CAP_LEN 0x14
+#define MLXSW_REG_MCQI_LEN (MLXSW_REG_MCQI_BASE_LEN + MLXSW_REG_MCQI_CAP_LEN)
+
+MLXSW_REG_DEFINE(mcqi, MLXSW_REG_MCQI_ID, MLXSW_REG_MCQI_LEN);
+
+/* reg_mcqi_component_index
+ * Index of the accessed component.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcqi, component_index, 0x00, 0, 16);
+
+enum mlxfw_reg_mcqi_info_type {
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES,
+};
+
+/* reg_mcqi_info_type
+ * Component properties set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, info_type, 0x08, 0, 5);
+
+/* reg_mcqi_offset
+ * The requested/returned data offset from the section start, given in bytes.
+ * Must be DWORD aligned.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, offset, 0x10, 0, 32);
+
+/* reg_mcqi_data_size
+ * The requested/returned data size, given in bytes. If data_size is not DWORD
+ * aligned, the last bytes are zero padded.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, data_size, 0x14, 0, 16);
+
+/* reg_mcqi_cap_max_component_size
+ * Maximum size for this component, given in bytes.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_max_component_size, 0x20, 0, 32);
+
+/* reg_mcqi_cap_log_mcda_word_size
+ * Log 2 of the access word size in bytes. Read and write access must be aligned
+ * to the word size. Write access must be done for an integer number of words.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_log_mcda_word_size, 0x24, 28, 4);
+
+/* reg_mcqi_cap_mcda_max_write_size
+ * Maximal write size for MCDA register
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_mcda_max_write_size, 0x24, 0, 16);
+
+static inline void mlxsw_reg_mcqi_pack(char *payload, u16 component_index)
+{
+ MLXSW_REG_ZERO(mcqi, payload);
+ mlxsw_reg_mcqi_component_index_set(payload, component_index);
+ mlxsw_reg_mcqi_info_type_set(payload,
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES);
+ mlxsw_reg_mcqi_offset_set(payload, 0);
+ mlxsw_reg_mcqi_data_size_set(payload, MLXSW_REG_MCQI_CAP_LEN);
+}
+
+static inline void mlxsw_reg_mcqi_unpack(char *payload,
+ u32 *p_cap_max_component_size,
+ u8 *p_cap_log_mcda_word_size,
+ u16 *p_cap_mcda_max_write_size)
+{
+ *p_cap_max_component_size =
+ mlxsw_reg_mcqi_cap_max_component_size_get(payload);
+ *p_cap_log_mcda_word_size =
+ mlxsw_reg_mcqi_cap_log_mcda_word_size_get(payload);
+ *p_cap_mcda_max_write_size =
+ mlxsw_reg_mcqi_cap_mcda_max_write_size_get(payload);
+}
+
+/* MCC - Management Component Control
+ * ----------------------------------
+ * Controls the firmware component and updates the FSM.
+ */
+#define MLXSW_REG_MCC_ID 0x9062
+#define MLXSW_REG_MCC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mcc, MLXSW_REG_MCC_ID, MLXSW_REG_MCC_LEN);
+
+enum mlxsw_reg_mcc_instruction {
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLXSW_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLXSW_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+/* reg_mcc_instruction
+ * Command to be executed by the FSM.
+ * Applicable for write operation only.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcc, instruction, 0x00, 0, 8);
+
+/* reg_mcc_component_index
+ * Index of the accessed component. Applicable only for commands that
+ * refer to components. Otherwise, this field is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcc, component_index, 0x04, 0, 16);
+
+/* reg_mcc_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, update_handle, 0x08, 0, 24);
+
+/* reg_mcc_error_code
+ * Indicates the successful completion of the instruction, or the reason it
+ * failed
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, error_code, 0x0C, 8, 8);
+
+/* reg_mcc_control_state
+ * Current FSM state
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, control_state, 0x0C, 0, 4);
+
+/* reg_mcc_component_size
+ * Component size in bytes. Valid for UPDATE_COMPONENT instruction. Specifying
+ * the size may shorten the update time. Value 0x0 means that size is
+ * unspecified.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, component_size, 0x10, 0, 32);
+
+static inline void mlxsw_reg_mcc_pack(char *payload,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ MLXSW_REG_ZERO(mcc, payload);
+ mlxsw_reg_mcc_instruction_set(payload, instr);
+ mlxsw_reg_mcc_component_index_set(payload, component_index);
+ mlxsw_reg_mcc_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcc_component_size_set(payload, component_size);
+}
+
+static inline void mlxsw_reg_mcc_unpack(char *payload, u32 *p_update_handle,
+ u8 *p_error_code, u8 *p_control_state)
+{
+ if (p_update_handle)
+ *p_update_handle = mlxsw_reg_mcc_update_handle_get(payload);
+ if (p_error_code)
+ *p_error_code = mlxsw_reg_mcc_error_code_get(payload);
+ if (p_control_state)
+ *p_control_state = mlxsw_reg_mcc_control_state_get(payload);
+}
+
+/* MCDA - Management Component Data Access
+ * ---------------------------------------
+ * This register allows reading and writing a firmware component.
+ */
+#define MLXSW_REG_MCDA_ID 0x9063
+#define MLXSW_REG_MCDA_BASE_LEN 0x10
+#define MLXSW_REG_MCDA_MAX_DATA_LEN 0x80
+#define MLXSW_REG_MCDA_LEN \
+ (MLXSW_REG_MCDA_BASE_LEN + MLXSW_REG_MCDA_MAX_DATA_LEN)
+
+MLXSW_REG_DEFINE(mcda, MLXSW_REG_MCDA_ID, MLXSW_REG_MCDA_LEN);
+
+/* reg_mcda_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, update_handle, 0x00, 0, 24);
+
+/* reg_mcda_offset
+ * Offset of accessed address relative to component start. Accesses must be in
+ * accordance to log_mcda_word_size in MCQI reg.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, offset, 0x04, 0, 32);
+
+/* reg_mcda_size
+ * Size of the data accessed, given in bytes.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, size, 0x08, 0, 16);
+
+/* reg_mcda_data
+ * Data block accessed.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, mcda, data, 0x10, 0, 32, 4, 0, false);
+
+static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
+ u32 offset, u16 size, u8 *data)
+{
+ int i;
+
+ MLXSW_REG_ZERO(mcda, payload);
+ mlxsw_reg_mcda_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcda_offset_set(payload, offset);
+ mlxsw_reg_mcda_size_set(payload, size);
+
+ for (i = 0; i < size / 4; i++)
+ mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
+}
+
/* MPSC - Monitoring Packet Sampling Configuration Register
* --------------------------------------------------------
* MPSC Register is used to configure the Packet Sampling mechanism.
@@ -6217,10 +6508,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mfsl),
MLXSW_REG(mtcap),
MLXSW_REG(mtmp),
+ MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mcqi),
+ MLXSW_REG(mcc),
+ MLXSW_REG(mcda),
MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88357cee7679..60bf8f27cc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -68,6 +68,22 @@
#include "txheader.h"
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
+#include "../mlxfw/mlxfw.h"
+
+#define MLXSW_FWREV_MAJOR 13
+#define MLXSW_FWREV_MINOR 1420
+#define MLXSW_FWREV_SUBMINOR 122
+
+static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
+ .major = MLXSW_FWREV_MAJOR,
+ .minor = MLXSW_FWREV_MINOR,
+ .subminor = MLXSW_FWREV_SUBMINOR
+};
+
+#define MLXSW_SP_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
+ "." __stringify(MLXSW_FWREV_MINOR) \
+ "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -140,6 +156,223 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+struct mlxsw_sp_mlxfw_dev {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data, u16 size,
+ u32 offset)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
+ fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
+ .component_query = mlxsw_sp_component_query,
+ .fsm_lock = mlxsw_sp_fsm_lock,
+ .fsm_component_update = mlxsw_sp_fsm_component_update,
+ .fsm_block_download = mlxsw_sp_fsm_block_download,
+ .fsm_component_verify = mlxsw_sp_fsm_component_verify,
+ .fsm_activate = mlxsw_sp_fsm_activate,
+ .fsm_query_state = mlxsw_sp_fsm_query_state,
+ .fsm_cancel = mlxsw_sp_fsm_cancel,
+ .fsm_release = mlxsw_sp_fsm_release
+};
+
+static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
+ const struct firmware *firmware)
+{
+ struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_sp_mlxfw_dev_ops,
+ .psid = mlxsw_sp->bus_info->psid,
+ .psid_size = strlen(mlxsw_sp->bus_info->psid),
+ },
+ .mlxsw_sp = mlxsw_sp
+ };
+
+ return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+}
+
+static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
+ const struct mlxsw_fw_rev *b)
+{
+ if (a->major != b->major)
+ return a->major > b->major;
+ if (a->minor != b->minor)
+ return a->minor > b->minor;
+ return a->subminor >= b->subminor;
+}
+
+static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct firmware *firmware;
+ int err;
+
+ if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ return 0;
+
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ rev->major, rev->minor, rev->subminor);
+ dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ MLXSW_SP_FW_FILENAME);
+
+ err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ mlxsw_sp->bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
+ MLXSW_SP_FW_FILENAME);
+ return err;
+ }
+
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ release_firmware(firmware);
+ return err;
+}
+
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -210,6 +443,41 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ int err;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
@@ -592,25 +860,16 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 swid)
-{
- char pspa_pl[MLXSW_REG_PSPA_LEN];
-
- mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
-}
-
static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
- return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
- swid);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
-static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable)
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char svpe_pl[MLXSW_REG_SVPE_LEN];
@@ -619,21 +878,8 @@ static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
}
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
- fid, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -642,18 +888,56 @@ int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
- vid_end, learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool allow)
{
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- learn_enable);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spaft_pl[MLXSW_REG_SPAFT_LEN];
+
+ mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
+}
+
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ int err;
+
+ if (!vid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+ } else {
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_allow_untagged_set;
+ }
+
+ mlxsw_sp_port->pvid = vid;
+ return 0;
+
+err_port_allow_untagged_set:
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ return err;
}
static int
@@ -683,13 +967,14 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
u8 module, u8 width, u8 lane)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, width);
for (i = 0; i < width; i++) {
mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
@@ -699,11 +984,12 @@ static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}
@@ -1100,95 +1386,82 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return 0;
}
-static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid, last_visited_vid;
- int err;
-
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
- vid);
- if (err) {
- last_visited_vid = vid;
- goto err_port_vid_to_fid_set;
- }
- }
-
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
- if (err) {
- last_visited_vid = VLAN_N_VID;
- goto err_port_vid_to_fid_set;
- }
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
- return 0;
-
-err_port_vid_to_fid_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
- vid);
- return err;
+ list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
+ &mlxsw_sp_port->vlans_list, list)
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
-static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool untagged = vid == 1;
int err;
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
- return err;
+ return ERR_PTR(err);
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
- vid, vid);
- if (err)
- return err;
+ mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
+ if (!mlxsw_sp_port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
}
- return 0;
+ mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ mlxsw_sp_port_vlan->vid = vid;
+ list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
+
+ return mlxsw_sp_port_vlan;
+
+err_port_vlan_alloc:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ return ERR_PTR(err);
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+static void
+mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
- if (!mlxsw_sp_vport)
- return NULL;
+ list_del(&mlxsw_sp_port_vlan->list);
+ kfree(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+}
- /* dev will be set correctly after the VLAN device is linked
- * with the real device. In case of bridge SELF invocation, dev
- * will remain as is.
- */
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
- mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
- mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
- mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vid = vid;
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return mlxsw_sp_port_vlan;
- return mlxsw_sp_vport;
+ return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
}
-static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- list_del(&mlxsw_sp_vport->vport.list);
- kfree(mlxsw_sp_vport);
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- bool untagged = vid == 1;
- int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
* reserved in our case, so simply return.
@@ -1196,43 +1469,14 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
- return 0;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return -ENOMEM;
-
- /* When adding the first VLAN interface on a bridged port we need to
- * transition all the active 802.1Q bridge VLANs to use explicit
- * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err)
- goto err_port_vp_mode_trans;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
- if (err)
- goto err_port_add_vid;
-
- return 0;
-
-err_port_add_vid:
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-err_port_vp_mode_trans:
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- return err;
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -1240,27 +1484,10 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
if (!vid)
return 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_port_vlan)
return 0;
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
-
- /* Drop FID reference. If this was the last reference the
- * resources will be freed.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- /* When removing the last VLAN interface on a bridged port we need to
- * transition all active 802.1Q bridge VLANs to use VID to FID
- * mappings and set port's mode to VLAN mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return 0;
}
@@ -1466,11 +1693,15 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_MATCHALL:
switch (tc->cls_mall->command) {
@@ -1519,12 +1750,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
};
@@ -2269,6 +2494,160 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct firmware *firmware;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = request_firmware_direct(&firmware, flash->data, &dev->dev);
+ if (err)
+ goto out;
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ release_firmware(firmware);
+out:
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+#define MLXSW_SP_QSFP_I2C_ADDR 0x50
+
+static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 offset, u16 size, void *data,
+ unsigned int *p_read_size)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ int status;
+ int err;
+
+ size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
+ mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
+ 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ status = mlxsw_reg_mcia_status_get(mcia_pl);
+ if (status)
+ return -EIO;
+
+ mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ memcpy(data, eeprom_tmp, size);
+ *p_read_size = size;
+
+ return 0;
+}
+
+enum mlxsw_sp_eeprom_module_info_rev_id {
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
+};
+
+enum mlxsw_sp_eeprom_module_info_id {
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
+};
+
+enum mlxsw_sp_eeprom_module_info {
+ MLXSW_SP_EEPROM_MODULE_INFO_ID,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
+ MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
+};
+
+static int mlxsw_sp_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
+ u8 module_rev_id, module_id;
+ unsigned int read_size;
+ int err;
+
+ err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
+ MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
+ module_info, &read_size);
+ if (err)
+ return err;
+
+ if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
+ return -EIO;
+
+ module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
+ module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
+
+ switch (module_id) {
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
+ if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
+ module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ int offset = ee->offset;
+ unsigned int read_size;
+ int i = 0;
+ int err;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
+ ee->len - i, data + i,
+ &read_size);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
+ return err;
+ }
+
+ i += read_size;
+ offset += read_size;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -2280,6 +2659,9 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_sset_count = mlxsw_sp_port_get_sset_count,
.get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
.set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
+ .flash_device = mlxsw_sp_flash_device,
+ .get_module_info = mlxsw_sp_get_module_info,
+ .get_module_eeprom = mlxsw_sp_get_module_eeprom,
};
static int
@@ -2398,51 +2780,38 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- mlxsw_sp_port->pvid = 1;
-
- return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
-static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
- size_t bytes;
int err;
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
mlxsw_sp_port->link.autoneg = 1;
- bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
- mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->active_vlans) {
- err = -ENOMEM;
- goto err_port_active_vlans_alloc;
- }
- mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->untagged_vlans) {
- err = -ENOMEM;
- goto err_port_untagged_vlans_alloc;
- }
- INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
+ INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
@@ -2472,6 +2841,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_module_map;
+ }
+
err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
@@ -2547,11 +2923,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init;
}
- err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
mlxsw_sp_port->local_port);
- goto err_port_pvid_vport_create;
+ goto err_port_fids_init;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ if (IS_ERR(mlxsw_sp_port_vlan)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_vlan_get;
}
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
@@ -2572,8 +2955,10 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
-err_port_pvid_vport_create:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+err_port_vlan_get:
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
+err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
@@ -2585,43 +2970,21 @@ err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp_port);
+err_port_module_map:
kfree(mlxsw_sp_port->hw_stats.cache);
err_alloc_hw_stats:
kfree(mlxsw_sp_port->sample);
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
- kfree(mlxsw_sp_port->untagged_vlans);
-err_port_untagged_vlans_alloc:
- kfree(mlxsw_sp_port->active_vlans);
-err_port_active_vlans_alloc:
free_netdev(dev);
- return err;
-}
-
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
- local_port);
- return err;
- }
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
- module, width, lane);
- if (err)
- goto err_port_create;
- return 0;
-
-err_port_create:
+err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
return err;
}
-static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2630,22 +2993,16 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
+ mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
- kfree(mlxsw_sp_port->untagged_vlans);
- kfree(mlxsw_sp_port->active_vlans);
- WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
-}
-
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
- __mlxsw_sp_port_remove(mlxsw_sp, local_port);
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
}
@@ -2724,19 +3081,6 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
int err, i;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
- width, i * width);
- if (err)
- goto err_port_module_map;
- }
-
- for (i = 0; i < count; i++) {
- err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
- if (err)
- goto err_port_swid_set;
- }
-
- for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
module, width, i * width);
if (err)
@@ -2749,15 +3093,6 @@ err_port_create:
for (i--; i >= 0; i--)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- i = count;
-err_port_swid_set:
- for (i--; i >= 0; i--)
- __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
- MLXSW_PORT_SWID_DISABLED_PORT);
- i = count;
-err_port_module_map:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
return err;
}
@@ -2776,17 +3111,6 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
local_port = base_port + i * 2;
module = mlxsw_sp->port_to_module[local_port];
- mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- 0);
- }
-
- for (i = 0; i < count; i++)
- __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
-
- for (i = 0; i < count; i++) {
- local_port = base_port + i * 2;
- module = mlxsw_sp->port_to_module[local_port];
-
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
width, 0);
}
@@ -3020,7 +3344,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
- false, SP_IP2ME, DISCARD)
+ false, SP_IP2ME, DISCARD),
+ /* ACL trap */
+ MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3192,57 +3518,6 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
}
}
-static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
- enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type)
-{
- enum mlxsw_flood_table_type table_type;
- enum mlxsw_sp_flood_table flood_table;
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
-
- if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-
- switch (type) {
- case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
- flood_table = MLXSW_SP_FLOOD_TABLE_UC;
- break;
- case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
- flood_table = MLXSW_SP_FLOOD_TABLE_MC;
- break;
- default:
- flood_table = MLXSW_SP_FLOOD_TABLE_BC;
- }
-
- mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
- flood_table);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
-}
-
-static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
-{
- int type, err;
-
- for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
- if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
- continue;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
- if (err)
- return err;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
@@ -3290,18 +3565,6 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
-
-static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
-}
-
-static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
-}
-
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3310,9 +3573,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->fids);
- INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
+
+ err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ return err;
+ }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3320,16 +3586,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_traps_init(mlxsw_sp);
+ err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
return err;
}
- err = mlxsw_sp_flood_init(mlxsw_sp);
+ err = mlxsw_sp_traps_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
- goto err_flood_init;
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ goto err_traps_init;
}
err = mlxsw_sp_buffers_init(mlxsw_sp);
@@ -3380,12 +3646,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_dpipe_init;
}
- err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
- goto err_dummy_fid_init;
- }
-
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3395,8 +3655,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
-err_dummy_fid_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3413,8 +3671,9 @@ err_switchdev_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
-err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
+err_traps_init:
+ mlxsw_sp_fids_fini(mlxsw_sp);
return err;
}
@@ -3423,7 +3682,6 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
@@ -3433,8 +3691,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
- WARN_ON(!list_empty(&mlxsw_sp->fids));
+ mlxsw_sp_fids_fini(mlxsw_sp);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -3450,7 +3707,7 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_fid_offset_flood_tables = 3,
.fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_VFID_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3510,7 +3767,7 @@ static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
return ret;
}
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3531,7 +3788,7 @@ struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
}
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3562,176 +3819,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
- u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
- else
- return test_bit(fid, lag_port->active_vlans);
-}
-
-static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
- u16 lag_id = mlxsw_sp_port->lag_id;
- u64 max_lag_members;
- int i, count = 0;
-
- if (!mlxsw_sp_port->lagged)
- return true;
-
- max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_LAG_MEMBERS);
- for (i = 0; i < max_lag_members; i++) {
- struct mlxsw_sp_port *lag_port;
-
- lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
- if (!lag_port || lag_port->local_port == local_port)
- continue;
- if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
- count++;
- }
-
- return !count;
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
- mlxsw_sp_port->local_port);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
- mlxsw_sp_port->local_port, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
- mlxsw_sp_port->lag_id, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
- return 0;
-
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
-}
-
-static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_fid *f, *tmp;
-
- list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
- else
- WARN_ON_ONCE(1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0) {
- mlxsw_sp->master_bridge.dev = NULL;
- /* It's possible upper VLAN devices are still holding
- * references to underlying FIDs. Drop the reference
- * and release the resources if it was the last one.
- * If it wasn't, then something bad happened.
- */
- mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
- }
-}
-
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *br_dev)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* When port is not bridged untagged packets are tagged with
- * PVID=VID=1, thereby creating an implicit VLAN interface in
- * the device. Remove it and let bridge code take care of its
- * own VLANs.
- */
- err = mlxsw_sp_port_kill_vid(dev, 0, 1);
- if (err)
- return err;
-
- mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
-
- mlxsw_sp_port->learning = 1;
- mlxsw_sp_port->learning_sync = 1;
- mlxsw_sp_port->uc_flood = 1;
- mlxsw_sp_port->mc_flood = 1;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->mc_disabled = 1;
- mlxsw_sp_port->bridged = 1;
-
- return 0;
-}
-
-static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
-
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
-
- mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
-
- mlxsw_sp_port->learning = 0;
- mlxsw_sp_port->learning_sync = 0;
- mlxsw_sp_port->uc_flood = 0;
- mlxsw_sp_port->mc_flood = 0;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->bridged = 0;
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -3850,51 +3937,11 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
-static void
-mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev, u16 lag_id)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- /* If vPort is assigned a RIF, then leave it since it's no
- * longer valid.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->lag_id = lag_id;
- mlxsw_sp_vport->lagged = 1;
- mlxsw_sp_vport->dev = lag_dev;
-}
-
-static void
-mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->lagged = 0;
-}
-
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
@@ -3927,7 +3974,10 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
+ /* Port is no longer usable as a router interface */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
return 0;
@@ -3954,10 +4004,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (mlxsw_sp_port->bridged) {
- mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
- }
+ /* Any VLANs configured on the port are no longer valid */
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -3967,7 +4015,9 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -4009,34 +4059,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
-static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- mlxsw_sp_vport->dev = vlan_dev;
-
- return 0;
-}
-
-static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-}
-
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -4066,9 +4088,12 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
- err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
if (err)
return err;
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_stp_set;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
true, false);
if (err)
@@ -4077,6 +4102,8 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+err_port_stp_set:
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
return err;
}
@@ -4085,9 +4112,11 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
}
-static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ struct net_device *dev,
unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
@@ -4110,10 +4139,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
return -EINVAL;
if (!info->linking)
break;
- /* HW limitation forbids to put ports to multiple bridges. */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4130,19 +4155,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
- upper_dev);
- else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- } else if (netif_is_bridge_master(upper_dev)) {
+ if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ lower_dev,
upper_dev);
else
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ lower_dev,
+ upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
@@ -4155,9 +4176,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
- } else {
- err = -EINVAL;
- WARN_ON(1);
}
break;
}
@@ -4189,15 +4207,18 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
+ struct net_device *port_dev,
unsigned long event, void *ptr)
{
switch (event) {
case NETDEV_PRECHANGEUPPER:
case NETDEV_CHANGEUPPER:
- return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
+ event, ptr);
case NETDEV_CHANGELOWERSTATE:
- return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
+ ptr);
}
return 0;
@@ -4212,7 +4233,8 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
+ ptr);
if (ret)
return ret;
}
@@ -4221,322 +4243,33 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
return 0;
}
-static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- f->ref_count++;
-
- return 0;
-}
-
-static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (f && f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
- if (f && --f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
-}
-
-static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
- unsigned long event, void *ptr)
-{
- struct netdev_notifier_changeupper_info *info;
- struct net_device *upper_dev;
- struct mlxsw_sp *mlxsw_sp;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(br_dev);
- if (!mlxsw_sp)
- return 0;
-
- info = ptr;
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!is_vlan_dev(upper_dev))
- return -EINVAL;
- if (is_vlan_dev(upper_dev) &&
- br_dev != mlxsw_sp->master_bridge.dev)
- return -EINVAL;
- break;
- case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- else
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
- upper_dev);
- } else {
- err = -EINVAL;
- WARN_ON(1);
- }
- break;
- }
-
- return err;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->vfids.mapped,
- MLXSW_SP_VFID_MAX);
-}
-
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_fid *f;
- u16 vfid, fid;
- int err;
-
- vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (vfid == MLXSW_SP_VFID_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
- if (err) {
- dev_err(dev, "Failed to create FID=%d\n", fid);
- return ERR_PTR(err);
- }
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- goto err_allocate_vfid;
-
- f->leave = mlxsw_sp_vport_vfid_leave;
- f->fid = fid;
- f->dev = br_dev;
-
- list_add(&f->list, &mlxsw_sp->vfids.list);
- set_bit(vfid, mlxsw_sp->vfids.mapped);
-
- return f;
-
-err_allocate_vfid:
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fid *f)
-{
- u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
- u16 fid = f->fid;
-
- clear_bit(vfid, mlxsw_sp->vfids.mapped);
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
-
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
-}
-
-static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
- vid);
-}
-
-static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
- int err;
-
- f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (!f) {
- f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_flood_set;
-
- err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_fid_map;
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
- f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
-
- return 0;
-
-err_vport_fid_map:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-err_vport_flood_set:
- if (!f->ref_count)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
- return err;
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
-}
-
-static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- int err;
-
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
- if (err) {
- netdev_err(dev, "Failed to join vFID\n");
- return err;
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning\n");
- goto err_port_vid_learning_set;
- }
-
- mlxsw_sp_vport->learning = 1;
- mlxsw_sp_vport->learning_sync = 1;
- mlxsw_sp_vport->uc_flood = 1;
- mlxsw_sp_vport->mc_flood = 1;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->mc_disabled = 1;
- mlxsw_sp_vport->bridged = 1;
-
- return 0;
-
-err_port_vid_learning_set:
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
- return err;
-}
-
-static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
-
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->mc_flood = 0;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->bridged = 0;
-}
-
-static bool
-mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
-
- if (dev && dev == br_dev)
- return false;
- }
-
- return true;
-}
-
-static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *dev,
+ unsigned long event, void *ptr,
+ u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
- struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
int err = 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return 0;
-
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
- if (!info->linking)
- break;
- /* We can't have multiple VLAN interfaces configured on
- * the same port and being members in the same bridge.
- */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
- upper_dev))
- return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
else
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4547,9 +4280,10 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
return err;
}
-static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *lag_dev,
+ unsigned long event,
+ void *ptr, u16 vid)
{
struct net_device *dev;
struct list_head *iter;
@@ -4557,8 +4291,9 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
- vid);
+ ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
+ event, ptr,
+ vid);
if (ret)
return ret;
}
@@ -4574,11 +4309,12 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
else if (netif_is_lag_master(real_dev))
- return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
+ real_dev, event,
+ ptr, vid);
return 0;
}
@@ -4603,11 +4339,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
@@ -4680,3 +4414,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0c23bc1e946d..5ef98d4d0ab6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -54,12 +54,7 @@
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
-
-#define MLXSW_SP_DUMMY_FID 15359
-
-#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_MID_MAX 7000
@@ -78,13 +73,19 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
-struct mlxsw_sp_fid {
- void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
- struct list_head list;
- unsigned int ref_count;
- struct net_device *dev;
- struct mlxsw_sp_rif *rif;
- u16 fid;
+enum mlxsw_sp_rif_type {
+ MLXSW_SP_RIF_TYPE_SUBPORT,
+ MLXSW_SP_RIF_TYPE_VLAN,
+ MLXSW_SP_RIF_TYPE_FID,
+ MLXSW_SP_RIF_TYPE_MAX,
+};
+
+enum mlxsw_sp_fid_type {
+ MLXSW_SP_FID_TYPE_8021Q,
+ MLXSW_SP_FID_TYPE_8021D,
+ MLXSW_SP_FID_TYPE_RFID,
+ MLXSW_SP_FID_TYPE_DUMMY,
+ MLXSW_SP_FID_TYPE_MAX,
};
struct mlxsw_sp_mid {
@@ -95,85 +96,6 @@ struct mlxsw_sp_mid {
unsigned int ref_count;
};
-static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
-{
- return MLXSW_SP_VFID_BASE + vfid;
-}
-
-static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
-{
- return fid - MLXSW_SP_VFID_BASE;
-}
-
-static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
-{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
-}
-
-struct mlxsw_sp_sb_pr {
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
-
-struct mlxsw_cp_sb_occ {
- u32 cur;
- u32 max;
-};
-
-struct mlxsw_sp_sb_cm {
- u32 min_buff;
- u32 max_buff;
- u8 pool;
- struct mlxsw_cp_sb_occ occ;
-};
-
-struct mlxsw_sp_sb_pm {
- u32 min_buff;
- u32 max_buff;
- struct mlxsw_cp_sb_occ occ;
-};
-
-#define MLXSW_SP_SB_POOL_COUNT 4
-#define MLXSW_SP_SB_TC_COUNT 8
-
-struct mlxsw_sp_sb_port {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
-};
-
-struct mlxsw_sp_sb {
- struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
- struct mlxsw_sp_sb_port *ports;
- u32 cell_size;
-};
-
-#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
-
-struct mlxsw_sp_prefix_usage {
- DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
-};
-
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-};
-
-struct mlxsw_sp_lpm_tree {
- u8 id; /* tree ID */
- unsigned int ref_count;
- enum mlxsw_sp_l3proto proto;
- struct mlxsw_sp_prefix_usage prefix_usage;
-};
-
-struct mlxsw_sp_fib;
-
-struct mlxsw_sp_vr {
- u16 id; /* virtual router ID */
- u32 tb_id; /* kernel fib table id */
- unsigned int rif_count;
- struct mlxsw_sp_fib *fib4;
-};
-
enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS
@@ -212,58 +134,25 @@ struct mlxsw_sp_port_mall_tc_entry {
};
};
-struct mlxsw_sp_router {
- struct mlxsw_sp_vr *vrs;
- struct rhashtable neigh_ht;
- struct rhashtable nexthop_group_ht;
- struct rhashtable nexthop_ht;
- struct {
- struct mlxsw_sp_lpm_tree *trees;
- unsigned int tree_count;
- } lpm;
- struct {
- struct delayed_work dw;
- unsigned long interval; /* ms */
- } neighs_update;
- struct delayed_work nexthop_probe_dw;
-#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_neighs_list;
- bool aborted;
-};
-
+struct mlxsw_sp_sb;
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_router;
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
+struct mlxsw_sp_fid_core;
struct mlxsw_sp {
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
- } vfids;
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
- } br_mids;
- struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
- struct {
- struct delayed_work dw;
-#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
- unsigned int interval; /* ms */
- } fdb_notify;
-#define MLXSW_SP_MIN_AGEING_TIME 10
-#define MLXSW_SP_MAX_AGEING_TIME 1000000
-#define MLXSW_SP_DEFAULT_AGEING_TIME 300
- u32 ageing_time;
- struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags;
u8 *port_to_module;
- struct mlxsw_sp_sb sb;
- struct mlxsw_sp_router router;
+ struct mlxsw_sp_sb *sb;
+ struct mlxsw_sp_bridge *bridge;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp_acl *acl;
+ struct mlxsw_sp_fid_core *fid_core;
struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
@@ -273,7 +162,6 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
- struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -282,18 +170,6 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id];
}
-static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
- u32 cells)
-{
- return mlxsw_sp->sb.cell_size * cells;
-}
-
-static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
- u32 bytes)
-{
- return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -310,29 +186,28 @@ struct mlxsw_sp_port_sample {
bool truncate;
};
+struct mlxsw_sp_bridge_port;
+struct mlxsw_sp_fid;
+
+struct mlxsw_sp_port_vlan {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct list_head bridge_vlan_node;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
- u8 stp_state;
- u16 learning:1,
- learning_sync:1,
- uc_flood:1,
- mc_flood:1,
- mc_router:1,
- mc_disabled:1,
- bridged:1,
- lagged:1,
+ u8 lagged:1,
split:1;
u16 pvid;
u16 lag_id;
struct {
- struct list_head list;
- struct mlxsw_sp_fid *f;
- u16 vid;
- } vport;
- struct {
u8 tx_pause:1,
rx_pause:1,
autoneg:1;
@@ -347,11 +222,6 @@ struct mlxsw_sp_port {
u8 width;
u8 lane;
} mapping;
- /* 802.1Q bridge VLANs */
- unsigned long *active_vlans;
- unsigned long *untagged_vlans;
- /* VLAN interfaces */
- struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -360,13 +230,9 @@ struct mlxsw_sp_port {
struct delayed_work update_dw;
} hw_stats;
struct mlxsw_sp_port_sample *sample;
+ struct list_head vlans_list;
};
-bool mlxsw_sp_port_dev_check(const struct net_device *dev);
-struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
-struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
-void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
-
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -385,102 +251,28 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- return mlxsw_sp_vport->vport.vid;
-}
-
-static inline bool
-mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
+static inline struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- return vid != 0;
-}
-
-static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_fid *f)
-{
- mlxsw_sp_vport->vport.f = f;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- return mlxsw_sp_vport->vport.f;
-}
-
-static inline struct net_device *
-mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- return f ? f->dev : NULL;
-}
-
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
- return mlxsw_sp_vport;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->vid == vid)
+ return mlxsw_sp_port_vlan;
}
return NULL;
}
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- if (f && f->fid == fid)
- return mlxsw_sp_vport;
- }
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
- u16 fid)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->fids, list)
- if (f->fid == fid)
- return f;
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
- if (f->dev == br_dev)
- return f;
-
- return NULL;
-}
-
-enum mlxsw_sp_flood_table {
- MLXSW_SP_FLOOD_TABLE_UC,
- MLXSW_SP_FLOOD_TABLE_BC,
- MLXSW_SP_FLOOD_TABLE_MC,
+enum mlxsw_sp_flood_type {
+ MLXSW_SP_FLOOD_TYPE_UC,
+ MLXSW_SP_FLOOD_TYPE_BC,
+ MLXSW_SP_FLOOD_TYPE_MC,
};
+/* spectrum_buffers.c */
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -515,26 +307,26 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
+/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid);
-int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
- u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set);
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+
+/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -546,27 +338,45 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable);
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state);
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
+bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
-
int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-
#else
-
static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
return 0;
}
-
static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{}
-
#endif
+/* spectrum_router.c */
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
@@ -574,17 +384,17 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+/* spectrum_kvdl.c */
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
-
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
@@ -625,6 +435,8 @@ struct mlxsw_sp_acl_ops {
struct mlxsw_sp_acl_ruleset;
+/* spectrum_acl.c */
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev, bool ingress,
@@ -649,6 +461,7 @@ void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -683,23 +496,48 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_acl_tcam.c */
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes);
-int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- unsigned int *p_counter_index);
-void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index);
+
+/* spectrum_fid.c */
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member);
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type);
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 317f7b14627f..01a1501b56ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -53,6 +53,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
+ struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
@@ -112,6 +113,11 @@ static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
.automatic_shrinking = true,
};
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp->acl->dummy_fid;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_profile_ops *ops)
@@ -341,6 +347,11 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
return mlxsw_afa_block_append_drop(rulei->act_block);
}
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_trap(rulei->act_block);
+}
+
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev)
@@ -676,6 +687,7 @@ static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
+ struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
int err;
@@ -706,6 +718,13 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
+ fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
+ }
+ acl->dummy_fid = fid;
+
INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
@@ -721,6 +740,8 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_acl_ops_init:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init:
mlxsw_afa_destroy(acl->afa);
@@ -739,6 +760,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
WARN_ON(!list_empty(&acl->rules));
+ mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index af7b7bad48df..85d5001a5818 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -68,6 +68,11 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
@@ -102,6 +107,7 @@ static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3a24289979d9..61a10f166f97 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -983,6 +983,7 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 997189cfe7fd..93728c694e6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -43,25 +43,72 @@
#include "port.h"
#include "reg.h"
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
+};
+
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
+{
+ return mlxsw_sp->sb->cell_size * cells;
+}
+
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
+}
+
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.prs[dir][pool];
+ return &mlxsw_sp->sb->prs[dir][pool];
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+ return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+ return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
}
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -215,16 +262,17 @@ static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
- GFP_KERNEL);
- if (!mlxsw_sp->sb.ports)
+ mlxsw_sp->sb->ports = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb->ports)
return -ENOMEM;
return 0;
}
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->sb.ports);
+ kfree(mlxsw_sp->sb->ports);
}
#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
@@ -551,15 +599,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO;
sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+ mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
+ if (!mlxsw_sp->sb)
+ return -ENOMEM;
+ mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
- return err;
+ goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
goto err_sb_prs_init;
@@ -584,6 +636,8 @@ err_sb_mms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+err_sb_ports_init:
+ kfree(mlxsw_sp->sb);
return err;
}
@@ -591,6 +645,7 @@ void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ kfree(mlxsw_sp->sb);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5f0a7bc692a4..af2c65a3fd9f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -218,7 +218,7 @@ static int
mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
struct devlink_dpipe_dump_ctx *dump_ctx)
{
- struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+ struct devlink_dpipe_value match_value, action_value;
struct devlink_dpipe_action action = {0};
struct devlink_dpipe_match match = {0};
struct devlink_dpipe_entry entry = {0};
@@ -227,6 +227,9 @@ mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
int i, j;
int err;
+ memset(&match_value, 0, sizeof(match_value));
+ memset(&action_value, 0, sizeof(action_value));
+
mlxsw_sp_erif_match_action_prepare(&match, &action);
err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
&action_value, &action);
@@ -242,10 +245,11 @@ start_again:
return err;
j = 0;
for (; i < rif_count; i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
- err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
- mlxsw_sp->rifs[i],
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
if (err)
goto err_entry_get;
@@ -282,15 +286,15 @@ static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
rtnl_lock();
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
new file mode 100644
index 000000000000..6afbe9ec64e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -0,0 +1,992 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_fid_family;
+
+struct mlxsw_sp_fid_core {
+ struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
+ unsigned int *port_fid_mappings;
+};
+
+struct mlxsw_sp_fid {
+ struct list_head list;
+ struct mlxsw_sp_rif *rif;
+ unsigned int ref_count;
+ u16 fid_index;
+ struct mlxsw_sp_fid_family *fid_family;
+};
+
+struct mlxsw_sp_fid_8021q {
+ struct mlxsw_sp_fid common;
+ u16 vid;
+};
+
+struct mlxsw_sp_fid_8021d {
+ struct mlxsw_sp_fid common;
+ int br_ifindex;
+};
+
+struct mlxsw_sp_flood_table {
+ enum mlxsw_sp_flood_type packet_type;
+ enum mlxsw_reg_sfgc_bridge_type bridge_type;
+ enum mlxsw_flood_table_type table_type;
+ int table_index;
+};
+
+struct mlxsw_sp_fid_ops {
+ void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*configure)(struct mlxsw_sp_fid *fid);
+ void (*deconfigure)(struct mlxsw_sp_fid *fid);
+ int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
+ u16 *p_fid_index);
+ bool (*compare)(const struct mlxsw_sp_fid *fid,
+ const void *arg);
+ u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
+ int (*port_vid_map)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+ void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+};
+
+struct mlxsw_sp_fid_family {
+ enum mlxsw_sp_fid_type type;
+ size_t fid_size;
+ u16 start_index;
+ u16 end_index;
+ struct list_head fids_list;
+ unsigned long *fids_bitmap;
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ enum mlxsw_sp_rif_type rif_type;
+ const struct mlxsw_sp_fid_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int *mlxsw_sp_packet_type_sfgc_types[] = {
+ [MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+};
+
+static const struct mlxsw_sp_flood_table *
+mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (fid_family->flood_tables[i].packet_type != packet_type)
+ continue;
+ return &fid_family->flood_tables[i];
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ const struct mlxsw_sp_flood_table *flood_table;
+ char *sftr_pl;
+ int err;
+
+ if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ return -EINVAL;
+
+ flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
+ if (!flood_table)
+ return -ESRCH;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
+ sftr_pl);
+ kfree(sftr_pl);
+ return err;
+}
+
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ if (WARN_ON(!fid->fid_family->ops->port_vid_map))
+ return -EINVAL;
+ return fid->fid_family->ops->port_vid_map(fid, mlxsw_sp_port, vid);
+}
+
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
+}
+
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->rif_type;
+}
+
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->type;
+}
+
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ fid->rif = rif;
+}
+
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+
+ return fid_core->fid_family_arr[type]->rif_type;
+}
+
+static struct mlxsw_sp_fid_8021q *
+mlxsw_sp_fid_8021q_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021q, common);
+}
+
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_fid(fid)->vid;
+}
+
+static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+}
+
+static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
+{
+ return valid ? MLXSW_REG_SFMR_OP_CREATE_FID :
+ MLXSW_REG_SFMR_OP_DESTROY_FID;
+}
+
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 fid_offset, bool valid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
+ fid_offset);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u8 local_port, u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, fid->fid_index, true);
+ if (err)
+ return err;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ err = mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid,
+ true);
+ if (err)
+ goto err_fid_map;
+
+ return 0;
+
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid, false);
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021q_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 vid = *(u16 *) arg;
+
+ /* Use 1:1 mapping for simplicity although not a must */
+ if (vid < fid_family->start_index || vid > fid_family->end_index)
+ return -EINVAL;
+ *p_fid_index = vid;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
+static u16 mlxsw_sp_fid_8021q_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return 0;
+ return __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port,
+ vid, true);
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port, vid,
+ false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021q_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021q_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021q_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = 1,
+ .end_index = VLAN_VID_MASK,
+ .flood_tables = mlxsw_sp_fid_8021q_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021q_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+};
+
+static struct mlxsw_sp_fid_8021d *
+mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021d, common);
+}
+
+static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+}
+
+static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 nr_fids, fid_index;
+
+ nr_fids = fid_family->end_index - fid_family->start_index + 1;
+ fid_index = find_first_zero_bit(fid_family->fids_bitmap, nr_fids);
+ if (fid_index == nr_fids)
+ return -ENOBUFS;
+ *p_fid_index = fid_family->start_index + fid_index;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
+}
+
+static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index - fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port,
+ vid, true);
+ if (err)
+ goto err_fid_port_vid_map;
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_vp_mode_set;
+
+ return 0;
+
+err_port_vp_mode_set:
+err_fid_port_vid_map:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+ return err;
+}
+
+static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+
+ list_for_each_entry_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+}
+
+static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, true);
+ if (err)
+ return err;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = VLAN_N_VID,
+ .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+};
+
+static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
+{
+ /* rFIDs are allocated by the device during init */
+ return 0;
+}
+
+static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
+{
+}
+
+static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ *p_fid_index = fid->fid_family->start_index + rif_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_rfid_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ return fid->fid_index == rif_index + fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* We only need to transition the port to virtual mode since
+ * {Port, VID} => FID is done by the firmware upon RIF creation.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+};
+
+#define MLXSW_SP_RFID_BASE (15 * 1024)
+#define MLXSW_SP_RFID_MAX 1024
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE,
+ .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+};
+
+static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+
+ return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ *p_fid_index = fid->fid_family->start_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ return true;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .configure = mlxsw_sp_fid_dummy_configure,
+ .deconfigure = mlxsw_sp_fid_dummy_deconfigure,
+ .index_alloc = mlxsw_sp_fid_dummy_index_alloc,
+ .compare = mlxsw_sp_fid_dummy_compare,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE - 1,
+ .end_index = MLXSW_SP_RFID_BASE - 1,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+};
+
+static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
+};
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
+ list_for_each_entry(fid, &fid_family->fids_list, list) {
+ if (!fid->fid_family->ops->compare(fid, arg))
+ continue;
+ fid->ref_count++;
+ return fid;
+ }
+
+ fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
+ if (!fid)
+ return ERR_PTR(-ENOMEM);
+ fid->fid_family = fid_family;
+
+ err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
+ if (err)
+ goto err_index_alloc;
+ fid->fid_index = fid_index;
+ __set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
+
+ if (fid->fid_family->ops->setup)
+ fid->fid_family->ops->setup(fid, arg);
+
+ err = fid->fid_family->ops->configure(fid);
+ if (err)
+ goto err_configure;
+
+ list_add(&fid->list, &fid_family->fids_list);
+ fid->ref_count++;
+ return fid;
+
+err_configure:
+ __clear_bit(fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+err_index_alloc:
+ kfree(fid);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ if (--fid->ref_count == 1 && fid->rif) {
+ /* Destroy the associated RIF and let it drop the last
+ * reference on the FID.
+ */
+ return mlxsw_sp_rif_destroy(fid->rif);
+ } else if (fid->ref_count == 0) {
+ list_del(&fid->list);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ kfree(fid);
+ }
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_RFID, &rif_index);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_DUMMY, NULL);
+}
+
+static int
+mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ const int *sfgc_packet_types;
+ int i;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ int err;
+
+ if (!sfgc_packet_types[i])
+ continue;
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
+ flood_table->table_type,
+ flood_table->table_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+ int err;
+
+ flood_table = &fid_family->flood_tables[i];
+ err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *tmpl)
+{
+ u16 nr_fids = tmpl->end_index - tmpl->start_index + 1;
+ struct mlxsw_sp_fid_family *fid_family;
+ int err;
+
+ fid_family = kmemdup(tmpl, sizeof(*fid_family), GFP_KERNEL);
+ if (!fid_family)
+ return -ENOMEM;
+
+ fid_family->mlxsw_sp = mlxsw_sp;
+ INIT_LIST_HEAD(&fid_family->fids_list);
+ fid_family->fids_bitmap = kcalloc(BITS_TO_LONGS(nr_fids),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!fid_family->fids_bitmap) {
+ err = -ENOMEM;
+ goto err_alloc_fids_bitmap;
+ }
+
+ if (fid_family->flood_tables) {
+ err = mlxsw_sp_fid_flood_tables_init(fid_family);
+ if (err)
+ goto err_fid_flood_tables_init;
+ }
+
+ mlxsw_sp->fid_core->fid_family_arr[tmpl->type] = fid_family;
+
+ return 0;
+
+err_fid_flood_tables_init:
+ kfree(fid_family->fids_bitmap);
+err_alloc_fids_bitmap:
+ kfree(fid_family);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid_family *fid_family)
+{
+ mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+ kfree(fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
+ kfree(fid_family);
+}
+
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ /* Track number of FIDs configured on the port with mapping type
+ * PORT_VID_TO_FID, so that we know when to transition the port
+ * back to non-virtual (VLAN) mode.
+ */
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+
+ return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+}
+
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+}
+
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_fid_core *fid_core;
+ int err, i;
+
+ fid_core = kzalloc(sizeof(*mlxsw_sp->fid_core), GFP_KERNEL);
+ if (!fid_core)
+ return -ENOMEM;
+ mlxsw_sp->fid_core = fid_core;
+
+ fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!fid_core->port_fid_mappings) {
+ err = -ENOMEM;
+ goto err_alloc_port_fid_mappings;
+ }
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
+ err = mlxsw_sp_fid_family_register(mlxsw_sp,
+ mlxsw_sp_fid_family_arr[i]);
+
+ if (err)
+ goto err_fid_ops_register;
+ }
+
+ return 0;
+
+err_fid_ops_register:
+ for (i--; i >= 0; i--) {
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = fid_core->fid_family_arr[i];
+ mlxsw_sp_fid_family_unregister(mlxsw_sp, fid_family);
+ }
+ kfree(fid_core->port_fid_mappings);
+err_alloc_port_fid_mappings:
+ kfree(fid_core);
+ return err;
+}
+
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++)
+ mlxsw_sp_fid_family_unregister(mlxsw_sp,
+ fid_core->fid_family_arr[i]);
+ kfree(fid_core->port_fid_mappings);
+ kfree(fid_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 7d87e23578a3..21bb2bf62d3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -67,12 +67,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err)
return err;
+ } else if (is_tcf_gact_trap(a)) {
+ err = mlxsw_sp_acl_rulei_act_trap(rulei);
+ if (err)
+ return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
+ fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- MLXSW_SP_DUMMY_FID);
+ fid_index);
if (err)
return err;
@@ -178,6 +186,32 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f,
+ u8 ip_proto)
+{
+ struct flow_dissector_key_tcp *key, *mask;
+
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP) {
+ dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
+ return -EINVAL;
+ }
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ ntohs(key->flags), ntohs(mask->flags));
+ return 0;
+}
+
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -194,6 +228,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
@@ -285,6 +320,9 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
+ err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
}
@@ -363,8 +401,6 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- struct tc_action *a;
- LIST_HEAD(actions);
u64 packets;
u64 lastuse;
u64 bytes;
@@ -385,13 +421,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
if (err)
goto err_rule_get_stats;
- preempt_disable();
-
- tcf_exts_to_list(f->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, packets, lastuse);
-
- preempt_enable();
+ tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 0744452a0b18..192cb93e7669 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -42,6 +42,7 @@
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -56,21 +57,82 @@
#include "spectrum_dpipe.h"
#include "spectrum_router.h"
+struct mlxsw_sp_vr;
+struct mlxsw_sp_lpm_tree;
+struct mlxsw_sp_rif_ops;
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct {
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ bool aborted;
+ struct notifier_block fib_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+};
+
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
struct net_device *dev;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif_index;
u16 vr_id;
+ const struct mlxsw_sp_rif_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+
unsigned int counter_ingress;
bool counter_ingress_valid;
unsigned int counter_egress;
bool counter_egress_valid;
};
+struct mlxsw_sp_rif_params {
+ struct net_device *dev;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_subport {
+ struct mlxsw_sp_rif common;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_ops {
+ enum mlxsw_sp_rif_type type;
+ size_t rif_size;
+
+ void (*setup)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params);
+ int (*configure)(struct mlxsw_sp_rif *rif);
+ void (*deconfigure)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+};
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -219,10 +281,35 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counter_valid_set(rif, dir, false);
}
+static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
+ return;
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
+static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -287,6 +374,7 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -313,6 +401,18 @@ struct mlxsw_sp_fib_entry {
bool offloaded;
};
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
@@ -323,6 +423,13 @@ struct mlxsw_sp_fib {
enum mlxsw_sp_l3proto proto;
};
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ u32 tb_id; /* kernel fib table id */
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
+};
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
@@ -361,8 +468,8 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count == 0)
return lpm_tree;
}
@@ -458,8 +565,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -484,7 +591,7 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
@@ -496,15 +603,15 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
- mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
- mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+ mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
sizeof(struct mlxsw_sp_lpm_tree),
GFP_KERNEL);
- if (!mlxsw_sp->router.lpm.trees)
+ if (!mlxsw_sp->router->lpm.trees)
return -ENOMEM;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
@@ -513,7 +620,7 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->router.lpm.trees);
+ kfree(mlxsw_sp->router->lpm.trees);
}
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
@@ -527,7 +634,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
@@ -573,7 +680,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
tb_id = mlxsw_sp_fix_tb_id(tb_id);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
@@ -680,13 +787,13 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
- mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
- GFP_KERNEL);
- if (!mlxsw_sp->router.vrs)
+ mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->vrs)
return -ENOMEM;
for (i = 0; i < max_vrs; i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
vr->id = i;
}
@@ -706,7 +813,7 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
*/
mlxsw_core_flush_owq();
mlxsw_sp_router_fib_flush(mlxsw_sp);
- kfree(mlxsw_sp->router.vrs);
+ kfree(mlxsw_sp->router->vrs);
}
struct mlxsw_sp_neigh_key {
@@ -758,7 +865,7 @@ static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -767,7 +874,7 @@ static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -815,7 +922,7 @@ mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
struct mlxsw_sp_neigh_key key;
key.n = n;
- return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
&key, mlxsw_sp_neigh_ht_params);
}
@@ -824,7 +931,7 @@ mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
- mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+ mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
}
static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
@@ -839,13 +946,13 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
- if (!mlxsw_sp->rifs[rif]) {
+ if (!mlxsw_sp->router->rifs[rif]) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
return;
}
dipn = htonl(dip);
- dev = mlxsw_sp->rifs[rif]->dev;
+ dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
if (!n) {
netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
@@ -954,7 +1061,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
/* Take RTNL mutex here to prevent lists from changes */
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
@@ -966,33 +1073,35 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+ unsigned long interval = mlxsw_sp->router->neighs_update.interval;
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
msecs_to_jiffies(interval));
}
static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.neighs_update.dw.work);
+ struct mlxsw_sp_router *router;
int err;
- err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ router = container_of(work, struct mlxsw_sp_router,
+ neighs_update.dw.work);
+ err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+ dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
- mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
- mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
}
static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.nexthop_probe_dw.work);
+ struct mlxsw_sp_router *router;
+ router = container_of(work, struct mlxsw_sp_router,
+ nexthop_probe_dw.work);
/* Iterate over nexthop neighbours, find those who are unresolved and
* send arp on them. This solves the chicken-egg problem when
* the nexthop wouldn't get offloaded until the neighbor is resolved
@@ -1002,13 +1111,13 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
* Take RTNL mutex here to prevent lists from changes.
*/
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
nexthop_neighs_list_node)
if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
rtnl_unlock();
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
}
@@ -1130,7 +1239,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
- mlxsw_sp->router.neighs_update.interval = interval;
+ mlxsw_sp->router->neighs_update.interval = interval;
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
@@ -1171,7 +1280,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
&mlxsw_sp_neigh_ht_params);
if (err)
return err;
@@ -1182,20 +1291,20 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
/* Create the delayed works for the activity_update */
- INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
- INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
- cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
- rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+ cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
@@ -1270,7 +1379,7 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1278,7 +1387,7 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1287,7 +1396,7 @@ static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1300,14 +1409,14 @@ static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
&nh->ht_node, mlxsw_sp_nexthop_ht_params);
}
static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
mlxsw_sp_nexthop_ht_params);
}
@@ -1315,7 +1424,7 @@ static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
mlxsw_sp_nexthop_ht_params);
}
@@ -1602,7 +1711,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
*/
if (list_empty(&neigh_entry->nexthop_list))
list_add_tail(&neigh_entry->nexthop_neighs_list_node,
- &mlxsw_sp->router.nexthop_neighs_list);
+ &mlxsw_sp->router->nexthop_neighs_list);
nh->neigh_entry = neigh_entry;
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
@@ -1700,7 +1809,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh;
struct mlxsw_sp_rif *rif;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
key.fib_nh = fib_nh;
@@ -2513,7 +2622,7 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return 0;
fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
@@ -2553,7 +2662,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
@@ -2584,7 +2693,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
char raltb_pl[MLXSW_REG_RALTB_LEN];
char ralue_pl[MLXSW_REG_RALUE_LEN];
@@ -2666,7 +2775,7 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
continue;
@@ -2678,11 +2787,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
{
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
mlxsw_sp_router_fib_flush(mlxsw_sp);
- mlxsw_sp->router.aborted = true;
+ mlxsw_sp->router->aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
@@ -2748,9 +2857,9 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
if (!net_eq(info->net, &init_net))
return NOTIFY_DONE;
@@ -2760,7 +2869,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_BAD;
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
- fib_work->mlxsw_sp = mlxsw_sp;
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ fib_work->mlxsw_sp = router->mlxsw_sp;
fib_work->event = event;
switch (event) {
@@ -2798,8 +2908,9 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
+ if (mlxsw_sp->router->rifs[i] &&
+ mlxsw_sp->router->rifs[i]->dev == dev)
+ return mlxsw_sp->router->rifs[i];
return NULL;
}
@@ -2849,77 +2960,46 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
return false;
}
-#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
- return i;
-
- return MLXSW_SP_INVALID_INDEX_RIF;
-}
-
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
+static enum mlxsw_sp_rif_type
+mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- u16 vr_id, struct net_device *l3_dev,
- u16 rif_index, bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
- vr_id, l3_dev->mtu, l3_dev->dev_addr);
+ enum mlxsw_sp_fid_type type;
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+ /* RIF type is derived from the type of the underlying FID */
+ if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev))
+ type = MLXSW_SP_FID_TYPE_8021D;
+ else
+ type = MLXSW_SP_FID_TYPE_RFID;
-static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
-{
- return MLXSW_SP_RFID_BASE + rif_index;
+ return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
}
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
+ int i;
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ if (!mlxsw_sp->router->rifs[i]) {
+ *p_rif_index = i;
+ return 0;
+ }
+ }
- return f;
+ return -ENOBUFS;
}
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
+static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
+ u16 vr_id,
+ struct net_device *l3_dev)
{
struct mlxsw_sp_rif *rif;
- rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+ rif = kzalloc(rif_size, GFP_KERNEL);
if (!rif)
return NULL;
@@ -2930,11 +3010,16 @@ mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
rif->vr_id = vr_id;
rif->dev = l3_dev;
rif->rif_index = rif_index;
- rif->f = f;
return rif;
}
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp->router->rifs[rif_index];
+}
+
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
{
return rif->rif_index;
@@ -2946,152 +3031,199 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_vr *vr;
- struct mlxsw_sp_fid *f;
+ u32 tb_id = l3mdev_fib_table(params->dev);
+ const struct mlxsw_sp_rif_ops *ops;
+ enum mlxsw_sp_rif_type type;
struct mlxsw_sp_rif *rif;
- u16 fid, rif_index;
+ struct mlxsw_sp_fid *fid;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
int err;
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return ERR_PTR(-ERANGE);
+ type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
+ ops = mlxsw_sp->router->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
- rif_index, true);
- if (err)
- goto err_vport_rif_sp_op;
-
- fid = mlxsw_sp_rif_sp_to_fid(rif_index);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
- }
+ goto err_rif_index_alloc;
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
if (!rif) {
err = -ENOMEM;
goto err_rif_alloc;
}
+ rif->mlxsw_sp = mlxsw_sp;
+ rif->ops = ops;
- if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
- err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
- MLXSW_SP_RIF_COUNTER_EGRESS);
- if (err)
- netdev_dbg(mlxsw_sp_vport->dev,
- "Counter alloc Failed err=%d\n", err);
+ fid = ops->fid_get(rif);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
}
+ rif->fid = fid;
+
+ if (ops->setup)
+ ops->setup(rif, params);
+
+ err = ops->configure(rif);
+ if (err)
+ goto err_configure;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), true);
+ if (err)
+ goto err_rif_fdb_op;
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
+ mlxsw_sp_rif_counters_alloc(rif);
+ mlxsw_sp_fid_rif_set(fid, rif);
+ mlxsw_sp->router->rifs[rif_index] = rif;
vr->rif_count++;
return rif;
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
-err_vport_rif_sp_op:
+ ops->deconfigure(rif);
+err_configure:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
+ kfree(rif);
+err_rif_alloc:
+err_rif_index_alloc:
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *rif)
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
- u16 fid = f->fid;
+ const struct mlxsw_sp_rif_ops *ops = rif->ops;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_fid *fid = rif->fid;
+ struct mlxsw_sp_vr *vr;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
+ mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_rif_counters_free(rif);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), false);
+ ops->deconfigure(rif);
+ mlxsw_sp_fid_put(fid);
kfree(rif);
+ mlxsw_sp_vr_put(vr);
+}
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+static void
+mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
- mlxsw_sp_vr_put(vr);
+ params->vid = mlxsw_sp_port_vlan->vid;
+ params->lag = mlxsw_sp_port->lagged;
+ if (params->lag)
+ params->lag_id = mlxsw_sp_port->lag_id;
+ else
+ params->system_port = mlxsw_sp_port->local_port;
}
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+static int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ int err;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif) {
- rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+
+ mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
if (IS_ERR(rif))
return PTR_ERR(rif);
}
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
- rif->f->ref_count++;
+ /* FID was already created, just take a reference */
+ fid = rif->ops->fid_get(rif);
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ BR_STATE_FORWARDING);
+ if (err)
+ goto err_port_vid_stp_set;
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+ mlxsw_sp_port_vlan->fid = fid;
return 0;
+
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
+ return;
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ /* If router port holds the last reference on the rFID, then the
+ * associated Sub-port RIF will be destroyed.
+ */
+ mlxsw_sp_fid_put(fid);
}
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
+static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev);
case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -3106,7 +3238,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
@@ -3119,8 +3251,9 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
+ err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
+ port_dev,
+ event, vid);
if (err)
return err;
}
@@ -3138,189 +3271,24 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, router_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_vr *vr;
- u16 rif_index;
- int err;
-
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return -ERANGE;
-
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
- if (IS_ERR(vr))
- return PTR_ERR(vr);
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- goto err_port_flood_set;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
- rif_index, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
- if (!rif) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
- vr->rif_count++;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-err_port_flood_set:
- mlxsw_sp_vr_put(vr);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif)
-{
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
- kfree(rif);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- mlxsw_sp_vr_put(vr);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
-}
-
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+ struct mlxsw_sp_rif *rif;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ break;
case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ mlxsw_sp_rif_destroy(rif);
break;
}
@@ -3331,22 +3299,19 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (netif_is_bridge_port(vlan_dev))
return 0;
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
+ return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
+ event, vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
+ else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
return 0;
}
@@ -3359,7 +3324,7 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
else if (netif_is_lag_master(dev))
return mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(dev, event);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event);
else
@@ -3409,6 +3374,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
+ u16 fid_index;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
@@ -3418,8 +3384,9 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
return 0;
+ fid_index = mlxsw_sp_fid_index(rif->fid);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
if (err)
return err;
@@ -3428,7 +3395,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (err)
goto err_rif_edit;
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
if (err)
goto err_rif_fdb_op;
@@ -3442,7 +3409,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
return err;
}
@@ -3495,16 +3462,225 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ rif_subport->vid = params->vid;
+ rif_subport->lag = params->lag;
+ if (params->lag)
+ rif_subport->lag_id = params->lag_id;
+ else
+ rif_subport->system_port = params->system_port;
+}
+
+static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_subport *rif_subport;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu,
+ rif->dev->dev_addr);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
+ rif_subport->lag ? rif_subport->lag_id :
+ rif_subport->system_port,
+ rif_subport->vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_subport_op(rif, true);
+}
+
+static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_subport_op(rif, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
+ .type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .rif_size = sizeof(struct mlxsw_sp_rif_subport),
+ .setup = mlxsw_sp_rif_subport_setup,
+ .configure = mlxsw_sp_rif_subport_configure,
+ .deconfigure = mlxsw_sp_rif_subport_deconfigure,
+ .fid_get = mlxsw_sp_rif_subport_fid_get,
+};
+
+static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 vid_fid, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+};
+
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
+ true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+};
+
+static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+};
+
+static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+
+ mlxsw_sp->router->rifs = kcalloc(max_rifs,
+ sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->rifs)
+ return -ENOMEM;
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
+
+ return 0;
+}
+
+static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+
+ kfree(mlxsw_sp->router->rifs);
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct mlxsw_sp_router *router;
/* Flush pending FIB notifications and then flush the device's
* table before requesting another dump. The FIB notification
* block is unregistered, so no need to take RTNL.
*/
mlxsw_core_flush_owq();
- mlxsw_sp_router_fib_flush(mlxsw_sp);
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ mlxsw_sp_router_fib_flush(router->mlxsw_sp);
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -3515,55 +3691,50 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
-
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
- goto err_rgcr_fail;
-
+ return err;
return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
}
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_router *router;
int err;
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+ mlxsw_sp->router = router;
+ router->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
- return err;
+ goto err_router_init;
+
+ err = mlxsw_sp_rifs_init(mlxsw_sp);
+ if (err)
+ goto err_rifs_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
&mlxsw_sp_nexthop_ht_params);
if (err)
goto err_nexthop_ht_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
&mlxsw_sp_nexthop_group_ht_params);
if (err)
goto err_nexthop_group_ht_init;
@@ -3580,8 +3751,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_neigh_init;
- mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->fib_nb,
+ mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
if (err)
goto err_register_fib_notifier;
@@ -3595,21 +3766,27 @@ err_neigh_init:
err_vrs_init:
mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
err_nexthop_group_ht_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
err_nexthop_ht_init:
+ mlxsw_sp_rifs_fini(mlxsw_sp);
+err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ kfree(mlxsw_sp->router);
return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
+ mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index c3095fef6697..a3e8d2b25148 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -42,6 +42,8 @@ enum mlxsw_sp_rif_counter_dir {
MLXSW_SP_RIF_COUNTER_EGRESS,
};
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f4bb0c0b7c1d..cd89a3e6cd81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -52,340 +52,597 @@
#include "core.h"
#include "reg.h"
-static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- u16 fid = vid;
+struct mlxsw_sp_bridge_ops;
- fid = f ? f->fid : fid;
+struct mlxsw_sp_bridge {
+ struct mlxsw_sp *mlxsw_sp;
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ bool vlan_enabled_exists;
+ struct list_head bridges_list;
+ struct list_head mids_list;
+ DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
+ const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+};
+
+struct mlxsw_sp_bridge_device {
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head ports_list;
+ u8 vlan_enabled:1,
+ multicast_enabled:1;
+ const struct mlxsw_sp_bridge_ops *ops;
+};
- if (!fid)
- fid = mlxsw_sp_port->pvid;
+struct mlxsw_sp_bridge_port {
+ struct net_device *dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct list_head list;
+ struct list_head vlans_list;
+ unsigned int ref_count;
+ u8 stp_state;
+ unsigned long flags;
+ bool mrouter;
+ bool lagged;
+ union {
+ u16 lag_id;
+ u16 system_port;
+ };
+};
- return fid;
+struct mlxsw_sp_bridge_vlan {
+ struct list_head list;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct mlxsw_sp_bridge_ops {
+ int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_fid *
+ (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+};
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ list_for_each_entry(bridge_device, &bridge->bridges_list, list)
+ if (bridge_device->dev == br_dev)
+ return bridge_device;
+
+ return NULL;
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_orig_get(struct net_device *dev,
- struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *fid;
- u16 vid;
+ struct device *dev = bridge->mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ bool vlan_enabled = br_vlan_enabled(br_dev);
- if (netif_is_bridge_master(dev)) {
- fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
- dev);
- if (fid) {
- mlxsw_sp_vport =
- mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid->fid);
- WARN_ON(!mlxsw_sp_vport);
- return mlxsw_sp_vport;
- }
+ if (vlan_enabled && bridge->vlan_enabled_exists) {
+ dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
}
- if (!is_vlan_dev(dev))
- return mlxsw_sp_port;
+ bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
+ if (!bridge_device)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_device->dev = br_dev;
+ bridge_device->vlan_enabled = vlan_enabled;
+ bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ INIT_LIST_HEAD(&bridge_device->ports_list);
+ if (vlan_enabled) {
+ bridge->vlan_enabled_exists = true;
+ bridge_device->ops = bridge->bridge_8021q_ops;
+ } else {
+ bridge_device->ops = bridge->bridge_8021d_ops;
+ }
+ list_add(&bridge_device->list, &bridge->bridges_list);
- vid = vlan_dev_vlan_id(dev);
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- WARN_ON(!mlxsw_sp_vport);
+ return bridge_device;
+}
- return mlxsw_sp_vport;
+static void
+mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ WARN_ON(!list_empty(&bridge_device->ports_list));
+ kfree(bridge_device);
}
-static int mlxsw_sp_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (bridge_device)
+ return bridge_device;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
- attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags =
- (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
- (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
- (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
- break;
- default:
- return -EOPNOTSUPP;
+ return mlxsw_sp_bridge_device_create(bridge, br_dev);
+}
+
+static void
+mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ if (list_empty(&bridge_device->ports_list))
+ mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_bridge_port *
+__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->dev == brport_dev)
+ return bridge_port;
}
- return 0;
+ return NULL;
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 state)
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- enum mlxsw_reg_spms_state spms_state;
- char *spms_pl;
- u16 vid;
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (!bridge_device)
+ return NULL;
+
+ return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
+ struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
+ if (!bridge_port)
+ return NULL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
+ bridge_port->lagged = mlxsw_sp_port->lagged;
+ if (bridge_port->lagged)
+ bridge_port->lag_id = mlxsw_sp_port->lag_id;
+ else
+ bridge_port->system_port = mlxsw_sp_port->local_port;
+ bridge_port->dev = brport_dev;
+ bridge_port->bridge_device = bridge_device;
+ bridge_port->stp_state = BR_STATE_DISABLED;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&bridge_port->vlans_list);
+ list_add(&bridge_port->list, &bridge_device->ports_list);
+ bridge_port->ref_count = 1;
+
+ return bridge_port;
+}
+
+static void
+mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ list_del(&bridge_port->list);
+ WARN_ON(!list_empty(&bridge_port->vlans_list));
+ kfree(bridge_port);
+}
+
+static bool
+mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
+ bridge_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
+
+ /* In case ports were pulled from out of a bridged LAG, then
+ * it's possible the reference count isn't zero, yet the bridge
+ * port should be destroyed, as it's no longer an upper of ours.
+ */
+ if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
+ return true;
+ else if (bridge_port->ref_count == 0)
+ return true;
+ else
+ return false;
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
int err;
- switch (state) {
- case BR_STATE_FORWARDING:
- spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
- break;
- case BR_STATE_LEARNING:
- spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
- break;
- case BR_STATE_LISTENING: /* fall-through */
- case BR_STATE_DISABLED: /* fall-through */
- case BR_STATE_BLOCKING:
- spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
- break;
- default:
- BUG();
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
+ if (bridge_port) {
+ bridge_port->ref_count++;
+ return bridge_port;
}
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ if (IS_ERR(bridge_device))
+ return ERR_CAST(bridge_device);
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
- } else {
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
+ if (!bridge_port) {
+ err = -ENOMEM;
+ goto err_bridge_port_create;
}
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
+ return bridge_port;
+
+err_bridge_port_create:
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- u8 state)
+static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_sp_port->stp_state = state;
- return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+ bridge_port->ref_count--;
+ if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
}
-static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end,
- enum mlxsw_sp_flood_table table,
- bool set)
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_bridge_device *
+ bridge_device,
+ u16 vid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 local_port = mlxsw_sp_port->local_port;
- enum mlxsw_flood_table_type table_type;
- u16 range = idx_end - idx_begin + 1;
- char *sftr_pl;
- int err;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (!mlxsw_sp_port_vlan->bridge_port)
+ continue;
+ if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
+ bridge_device)
+ continue;
+ if (bridge_device->vlan_enabled &&
+ mlxsw_sp_port_vlan->vid != vid)
+ continue;
+ return mlxsw_sp_port_vlan;
+ }
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
+ return NULL;
+}
- mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
- table_type, range, local_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+static struct mlxsw_sp_port_vlan*
+mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_index)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- kfree(sftr_pl);
- return err;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (fid && mlxsw_sp_fid_index(fid) == fid_index)
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
}
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool uc_set,
- bool bc_set, bool mc_set)
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid)
{
- int err;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, uc_set);
- if (err)
- return err;
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ if (bridge_vlan->vid == vid)
+ return bridge_vlan;
+ }
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, bc_set);
- if (err)
- goto err_flood_bm_set;
+ return NULL;
+}
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_MC, mc_set);
- if (err)
- goto err_flood_mc_set;
- return 0;
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
-err_flood_mc_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
-err_flood_bm_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
- return err;
+ bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
+ if (!bridge_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
+ bridge_vlan->vid = vid;
+ list_add(&bridge_vlan->list, &bridge_port->vlans_list);
+
+ return bridge_vlan;
}
-static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_sp_flood_table table,
- bool set)
+static void
+mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid;
- int err;
+ list_del(&bridge_vlan->list);
+ WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
+ kfree(bridge_vlan);
+}
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
- return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
- vfid, table, set);
- }
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ if (bridge_vlan)
+ return bridge_vlan;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
- table, set);
- if (err) {
- last_visited_vid = vid;
- goto err_port_flood_set;
- }
+ return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
+}
+
+static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ if (list_empty(&bridge_vlan->port_vlan_list))
+ mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
+}
+
+static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *dev,
+ unsigned long *brport_flags)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
+ if (WARN_ON(!bridge_port))
+ return;
+
+ memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
+}
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
+ &attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
return 0;
+}
-err_port_flood_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
- !set);
- netdev_err(dev, "Failed to configure unicast flooding\n");
- return err;
+static int
+mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ u8 state)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
+ bridge_vlan->vid, state);
+ }
+
+ return 0;
}
-static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- bool mc_disabled)
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ u8 state)
{
- int set;
- int err = 0;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
- set = mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- set);
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
+ bridge_vlan, state);
+ if (err)
+ goto err_port_bridge_vlan_stp_set;
}
- if (!err)
- mlxsw_sp_port->mc_disabled = mc_disabled;
+ bridge_port->stp_state = state;
+ return 0;
+
+err_port_bridge_vlan_stp_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
+ bridge_port->stp_state);
return err;
}
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set)
+static int
+mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
{
- bool mc_set = set;
- u16 vfid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- /* In case of vFIDs, index into the flooding table is relative to
- * the start of the vFIDs range.
- */
- vfid = mlxsw_sp_fid_to_vfid(fid);
-
- if (set)
- mc_set = mlxsw_sp_vport->mc_disabled ?
- mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type,
+ mlxsw_sp_port->local_port,
+ member);
+ }
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
- mc_set);
+ return 0;
}
-static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool set)
+static int
+mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
{
- u16 vid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
+ bridge_vlan,
+ packet_type,
+ member);
+ if (err)
+ goto err_port_bridge_vlan_flood_set;
+ }
+
+ return 0;
+
+err_port_bridge_vlan_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
+ packet_type, !member);
+ return err;
+}
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+static int
+mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ bool set)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = bridge_vlan->vid;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
}
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool set)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, set);
if (err)
- goto err_port_vid_learning_set;
+ goto err_port_bridge_vlan_learning_set;
}
return 0;
-err_port_vid_learning_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+err_port_bridge_vlan_learning_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, !set);
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
unsigned long brport_flags)
{
- unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
- unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
if (switchdev_trans_ph_prepare(trans))
return 0;
- if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- !mlxsw_sp_port->uc_flood);
- if (err)
- return err;
- }
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
- if ((learning ^ brport_flags) & BR_LEARNING) {
- err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
- !mlxsw_sp_port->learning);
- if (err)
- goto err_port_learning_set;
- }
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ brport_flags & BR_FLOOD);
+ if (err)
+ return err;
- mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
- mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
- mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
+ brport_flags & BR_LEARNING);
+ if (err)
+ return err;
- return 0;
+ memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
-err_port_learning_set:
- if ((uc_flood ^ brport_flags) & BR_FLOOD)
- mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- mlxsw_sp_port->uc_flood);
- return err;
+ return 0;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -397,7 +654,7 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
if (err)
return err;
- mlxsw_sp->ageing_time = ageing_time;
+ mlxsw_sp->bridge->ageing_time = ageing_time;
return 0;
}
@@ -426,28 +683,77 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
- /* SWITCHDEV_TRANS_PREPARE phase */
- if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
return -EINVAL;
- }
- return 0;
+ if (bridge_device->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
+ return -EINVAL;
}
static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
bool is_port_mc_router)
{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->multicast_enabled)
+ return 0;
+
+ return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+}
+
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
if (switchdev_trans_ph_prepare(trans))
return 0;
- mlxsw_sp_port->mc_router = is_port_mc_router;
- if (!mlxsw_sp_port->mc_disabled)
- return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- is_port_mc_router);
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
+ bool member = mc_disabled ? true : bridge_port->mrouter;
+
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ packet_type, member);
+ if (err)
+ return err;
+ }
+
+ bridge_device->multicast_enabled = !mc_disabled;
return 0;
}
@@ -457,19 +763,17 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- int err = 0;
-
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
+ int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
@@ -483,10 +787,12 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mc_disabled);
break;
default:
@@ -497,268 +803,243 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ const struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+ bridge_device = bridge_port->bridge_device;
+ return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
}
-static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
+static int
+mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ struct mlxsw_sp_fid *fid;
+ int err;
- f->fid = fid;
+ bridge_device = bridge_port->bridge_device;
+ fid = bridge_device->ops->fid_get(bridge_device, vid);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
- return f;
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- struct mlxsw_sp_fid *f;
- int err;
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
+ bridge_port->flags & BR_FLOOD);
+ if (err)
+ goto err_fid_uc_flood_set;
- err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
+ mlxsw_sp_mc_flood(bridge_port));
if (err)
- return ERR_PTR(err);
+ goto err_fid_mc_flood_set;
- /* Although all the ports member in the FID might be using a
- * {Port, VID} to FID mapping, we create a global VID-to-FID
- * mapping. This allows a port to transition to VLAN mode,
- * knowing the global mapping exists.
- */
- err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
+ true);
if (err)
- goto err_fid_map;
+ goto err_fid_bc_flood_set;
- f = mlxsw_sp_fid_alloc(fid);
- if (!f) {
- err = -ENOMEM;
- goto err_allocate_fid;
- }
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
- list_add(&f->list, &mlxsw_sp->fids);
+ mlxsw_sp_port_vlan->fid = fid;
- return f;
+ return 0;
-err_allocate_fid:
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
-err_fid_map:
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
- return ERR_PTR(err);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+err_fid_mc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+err_fid_uc_flood_set:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
+static void
+mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- u16 fid = f->fid;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
-
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+ mlxsw_sp_fid_put(fid);
+}
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+static u16
+mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool is_pvid)
+{
+ if (is_pvid)
+ return vid;
+ else if (mlxsw_sp_port->pvid == vid)
+ return 0; /* Dis-allow untagged packets */
+ else
+ return mlxsw_sp_port->pvid;
}
-static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+static int
+mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ int err;
- if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ /* No need to continue if only VLAN flags were changed */
+ if (mlxsw_sp_port_vlan->bridge_port)
return 0;
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ bridge_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ bridge_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
+ if (!bridge_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
}
- f->ref_count++;
+ list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
+ &bridge_vlan->port_vlan_list);
- netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+ mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
+ bridge_port->dev);
+ mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
+
+err_bridge_vlan_get:
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
+ return err;
}
-static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ bool last;
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (WARN_ON(!f))
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
+ mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
- netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ last = list_is_singular(&bridge_vlan->port_vlan_list);
- mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+ list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
+ mlxsw_sp_bridge_vlan_put(bridge_vlan);
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (last)
+ mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
+ bridge_port,
+ mlxsw_sp_fid_index(fid));
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+ mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
+ mlxsw_sp_port_vlan->bridge_port = NULL;
}
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
- bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
-
- /* If port doesn't have vPorts, then it can use the global
- * VID-to-FID mapping.
- */
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
+static int
+mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid, bool is_untagged, bool is_pvid)
+{
+ u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 old_pvid = mlxsw_sp_port->pvid;
+ int err;
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
-}
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
-static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- bool mc_flood;
- int fid, err;
-
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
- if (err)
- goto err_port_fid_join;
- }
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
+ is_untagged);
+ if (err)
+ goto err_port_vlan_set;
- mc_flood = mlxsw_sp_port->mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ if (err)
+ goto err_port_pvid_set;
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- mlxsw_sp_port->uc_flood, true,
- mc_flood);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
if (err)
- goto err_port_flood_set;
+ goto err_port_vlan_bridge_join;
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
- if (err)
- goto err_port_fid_map;
- }
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
return 0;
-err_port_fid_map:
- for (fid--; fid >= fid_begin; fid--)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-err_port_flood_set:
- fid = fid_end;
-err_port_fid_join:
- for (fid--; fid >= fid_begin; fid--)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+err_port_vlan_bridge_join:
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
+err_port_pvid_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_vlan_set:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return err;
}
-static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- int fid;
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
-
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
-}
-
-static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spvid_pl[MLXSW_REG_SPVID_LEN];
-
- mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
-}
-
-static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool allow)
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spaft_pl[MLXSW_REG_SPAFT_LEN];
-
- mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
-}
-
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (!vid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to disallow untagged traffic\n");
- return err;
- }
- } else {
- err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to set PVID\n");
- return err;
- }
-
- /* Only allow if not already allowed. */
- if (!mlxsw_sp_port->pvid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
- true);
- if (err) {
- netdev_err(dev, "Failed to allow untagged traffic\n");
- goto err_port_allow_untagged_set;
- }
- }
- }
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid;
- mlxsw_sp_port->pvid = vid;
- return 0;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
-err_port_allow_untagged_set:
- __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
- return err;
-}
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
-{
- u16 vid, vid_e;
- int err;
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
- vid_end);
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
- vid_e, learn_enable);
+ err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vid, flag_untagged,
+ flag_pvid);
if (err)
return err;
}
@@ -766,102 +1047,27 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool flag_untagged, bool flag_pvid)
+static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, old_pvid;
- int err;
-
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
- err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
- if (err) {
- netdev_err(dev, "Failed to join FIDs\n");
- return err;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
- if (err) {
- netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
- vid_end);
- goto err_port_vlans_set;
- }
-
- old_pvid = mlxsw_sp_port->pvid;
- if (flag_pvid && old_pvid != vid_begin) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
- if (err) {
- netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
- goto err_port_pvid_set;
- }
- } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID\n");
- goto err_port_pvid_set;
- }
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- mlxsw_sp_port->learning);
- if (err) {
- netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
- vid_begin, vid_end);
- goto err_port_vid_learning_set;
- }
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- set_bit(vid, mlxsw_sp_port->active_vlans);
- if (flag_untagged)
- set_bit(vid, mlxsw_sp_port->untagged_vlans);
- else
- clear_bit(vid, mlxsw_sp_port->untagged_vlans);
- }
-
- /* STP state change must be done after we set active VLANs */
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
- mlxsw_sp_port->stp_state);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
-
- return 0;
-
-err_port_stp_state_set:
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
-err_port_vid_learning_set:
- if (old_pvid != mlxsw_sp_port->pvid)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
-err_port_pvid_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
-err_port_vlans_set:
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
- return err;
+ return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
}
-static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
- bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool lagged = bridge_port->lagged;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u16 system_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
+ mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
- return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end,
- flag_untagged, flag_pvid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
@@ -935,29 +1141,40 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
}
static int
-mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_notifier_fdb_info *fdb_info, bool adding)
{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = fdb_info->info.dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 fid_index, vid;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return -EINVAL;
- if (switchdev_trans_ph_prepare(trans))
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ fdb_info->vid);
+ if (!mlxsw_sp_port_vlan)
return 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+ vid = mlxsw_sp_port_vlan->vid;
- if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- fdb->addr, fid, true, false);
+ if (!bridge_port->lagged)
+ return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
+ bridge_port->system_port,
+ fdb_info->addr, fid_index,
+ adding, false);
else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
- true, false);
+ return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
+ bridge_port->lag_id,
+ fdb_info->addr, fid_index,
+ vid, adding, false);
}
static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
@@ -1006,7 +1223,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_mid *mid;
- list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
+ list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
@@ -1020,7 +1237,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mid *mid;
u16 mid_idx;
- mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
+ mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
MLXSW_SP_MID_MAX);
if (mid_idx == MLXSW_SP_MID_MAX)
return NULL;
@@ -1029,12 +1246,12 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
if (!mid)
return NULL;
- set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
+ set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
ether_addr_copy(mid->addr, addr);
mid->fid = fid;
mid->mid = mid_idx;
mid->ref_count = 0;
- list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
+ list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
return mid;
}
@@ -1044,7 +1261,7 @@ static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
{
if (--mid->ref_count == 0) {
list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
kfree(mid);
return 1;
}
@@ -1056,17 +1273,34 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
int err = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
@@ -1082,8 +1316,8 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
if (mid->ref_count == 1) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
- true);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set MC SFD\n");
goto err_out;
@@ -1104,24 +1338,12 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_MDB(obj),
@@ -1135,82 +1357,72 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
-static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end)
+static void
+mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
- u16 vid, pvid;
-
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
-
- pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
- return 0;
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
- vlan->vid_end);
-}
-
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
-{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
-}
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
-static int
-mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_fdb *fdb)
-{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
- if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- fdb->addr, fid,
- false, false);
- else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
- false, false);
+ return 0;
}
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
u16 mid_idx;
int err = 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
@@ -1222,8 +1434,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
mid_idx = mid->mid;
if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
- false);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid_idx, false);
if (err)
netdev_err(dev, "Unable to remove MC SFD\n");
}
@@ -1237,22 +1449,11 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -1282,188 +1483,200 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb,
- struct net_device *orig_dev)
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
+ .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
+ .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
+ .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
+};
+
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *tmp;
- struct mlxsw_sp_fid *f;
- u16 vport_fid;
- char *sfd_pl;
- char mac[ETH_ALEN];
- u16 fid;
- u8 local_port;
- u16 lag_id;
- u8 num_rec;
- int stored_err = 0;
- int i;
- int err;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
- if (!sfd_pl)
- return -ENOMEM;
+ if (is_vlan_dev(bridge_port->dev))
+ return -EINVAL;
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- vport_fid = f ? f->fid : 0;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
- mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
- do {
- mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- if (err)
- goto out;
+ /* Let VLAN-aware bridge take care of its own VLANs */
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
- num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ return 0;
+}
- /* Even in case of error, we have to run the dump to the end
- * so the session in firmware is finished.
- */
- if (stored_err)
- continue;
+static void
+mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+}
- for (i = 0; i < num_rec; i++) {
- switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
- case MLXSW_REG_SFD_REC_TYPE_UNICAST:
- mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
- &local_port);
- if (local_port == mlxsw_sp_port->local_port) {
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
- break;
- case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
- mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
- mac, &fid, &lag_id);
- tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
- if (tmp && tmp->local_port ==
- mlxsw_sp_port->local_port) {
- /* LAG records can only point to LAG
- * devices or VLAN devices on top.
- */
- if (!netif_is_lag_master(orig_dev) &&
- !is_vlan_dev(orig_dev))
- continue;
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
- break;
- }
- }
- } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
-out:
- kfree(sfd_pl);
- return stored_err ? stored_err : err;
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
-static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
+ .port_join = mlxsw_sp_bridge_8021q_port_join,
+ .port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+};
+
+static bool
+mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct net_device *br_dev)
{
- u16 vid;
- int err = 0;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vlan->flags = 0;
- vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- return cb(&vlan->obj);
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->bridge_port &&
+ mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
+ br_dev)
+ return true;
}
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- vlan->flags = 0;
- if (vid == mlxsw_sp_port->pvid)
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
- vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
- vlan->vid_begin = vid;
- vlan->vid_end = vid;
- err = cb(&vlan->obj);
- if (err)
- break;
- }
- return err;
+ return false;
}
-static int mlxsw_sp_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
+static int
+mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- int err = 0;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
+ if (!is_vlan_dev(bridge_port->dev))
return -EINVAL;
+ vid = vlan_dev_vlan_id(bridge_port->dev);
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
- break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb,
- obj->orig_dev);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
+ netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ return -EINVAL;
}
- return err;
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
}
-static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
- .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
- .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
- .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
- .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
- .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
+static void
+mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
+ .port_join = mlxsw_sp_bridge_8021d_port_join,
+ .port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021d_fid_get,
};
-static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
- char *mac, u16 vid,
- struct net_device *dev)
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ if (IS_ERR(bridge_port))
+ return PTR_ERR(bridge_port);
+ bridge_device = bridge_port->bridge_device;
+
+ err = bridge_device->ops->port_join(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+ return err;
+}
+
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+ bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+ if (!bridge_port)
+ return;
+
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+}
+
+static void
+mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
{
struct switchdev_notifier_fdb_info info;
- unsigned long notifier_type;
- if (learning_sync) {
- info.addr = mac;
- info.vid = vid;
- notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
- call_switchdev_notifiers(notifier_type, dev, &info.info);
- }
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(type, dev, &info.info);
}
static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u8 local_port;
u16 vid, fid;
@@ -1477,22 +1690,21 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- vid = fid;
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
}
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1503,8 +1715,9 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
- adding, mac, vid, mlxsw_sp_port->dev);
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
return;
just_remove:
@@ -1517,8 +1730,11 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct net_device *dev;
+ enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
@@ -1533,26 +1749,22 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- dev = mlxsw_sp_vport->dev;
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
- vid = fid;
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
}
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1563,8 +1775,9 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
- vid, dev);
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
return;
just_remove:
@@ -1598,12 +1811,15 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(bridge->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
+ struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
char *sfn_pl;
u8 num_rec;
@@ -1614,7 +1830,8 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
if (!sfn_pl)
return;
- mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+ bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
+ mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
mlxsw_reg_sfn_pack(sfn_pl);
@@ -1633,8 +1850,100 @@ out:
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}
+struct mlxsw_sp_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ rtnl_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!mlxsw_sp_port)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
+ if (err)
+ break;
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ fdb_info->addr,
+ fdb_info->vid, dev);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ /* Take a reference on the device. This can be either
+ * upper device containig mlxsw_sp_port or just a
+ * mlxsw_sp_port
+ */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ mlxsw_core_schedule_work(&switchdev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_event,
+};
+
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -1642,25 +1951,51 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
return err;
}
- INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
- mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+
+ err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+ mlxsw_sp->bridge = bridge;
+ bridge->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
+
+ bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
+ bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+
return mlxsw_sp_fdb_init(mlxsw_sp);
}
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
+ kfree(mlxsw_sp->bridge);
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index e008fdbed20f..12b5ed58f3eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -66,6 +66,7 @@ enum {
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_BGP_IPV4 = 0x88,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
+ MLXSW_TRAP_ID_ACL0 = 0x1C0,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index cb0102dd7f70..e3d7c74d47bb 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -669,7 +669,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
ks8842_update_rx_counters(netdev, status, len);
if (adapter->conf_flags & KS884X_16BIT) {
- u16 *data16 = (u16 *)skb_put(skb, len);
+ u16 *data16 = skb_put(skb, len);
ks8842_select_bank(adapter, 17);
while (len > 0) {
*data16++ = ioread16(adapter->hw_addr +
@@ -679,7 +679,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
len -= sizeof(u32);
}
} else {
- u32 *data = (u32 *)skb_put(skb, len);
+ u32 *data = skb_put(skb, len);
ks8842_select_bank(adapter, 17);
while (len > 0) {
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 20358f87de57..2fe96f1f3fe5 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1071,7 +1071,10 @@ static int ks8851_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct ks8851_net *ks = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ return 0;
}
static int ks8851_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 7647f7bdbcb8..f3e9dd47b56f 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1315,7 +1315,10 @@ static int ks_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ return 0;
}
static int ks_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ee1c78abab0b..e798fbe08600 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5020,8 +5020,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
*/
skb_reserve(skb, 2);
- memcpy(skb_put(skb, packet_len),
- dma_buf->skb->data, packet_len);
+ skb_put_data(skb, dma_buf->skb->data, packet_len);
} while (0);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 118723ea681a..fd2ec36c6fa1 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2459,7 +2459,6 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
struct buffAdd *ba;
struct RxD_t *first_rxdp = NULL;
u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
- int rxd_index = 0;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
@@ -2474,10 +2473,6 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
- rxd_index = off + 1;
- if (block_no)
- rxd_index += (block_no * ring->rxd_count);
-
if ((block_no == block_no1) &&
(off == ring->rx_curr_get_info.offset) &&
(rxdp->Host_Control)) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 6a4310af5d97..50ea69d88480 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3218,6 +3218,7 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
return -EFAULT;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 967d7ca8c28c..0e331e2f685a 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,11 +19,22 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on MAY_USE_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
SR-IOV physical and virtual functions.
+config NFP_APP_FLOWER
+ bool "NFP4000/NFP6000 TC Flower offload support"
+ depends on NFP
+ depends on NET_SWITCHDEV
+ ---help---
+ Enable driver support for TC Flower offload on NFP4000 and NFP6000.
+ Say Y, if you are planning to make use of TC Flower offload
+ either directly, with Open vSwitch, or any other way. Note that
+ TC Flower offload requires specific FW to work.
+
config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4b15f0f496aa..b8e1358868bd 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -14,17 +14,35 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_app.o \
+ nfp_app_nic.o \
+ nfp_devlink.o \
+ nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
nfp_net_ethtool.o \
- nfp_net_offload.o \
nfp_net_main.o \
- nfp_netvf_main.o
+ nfp_net_repr.o \
+ nfp_netvf_main.o \
+ nfp_port.o \
+ bpf/main.o \
+ bpf/offload.o \
+ nic/main.o
+
+ifeq ($(CONFIG_NFP_APP_FLOWER),y)
+nfp-objs += \
+ flower/action.o \
+ flower/cmsg.o \
+ flower/main.o \
+ flower/match.o \
+ flower/metadata.o \
+ flower/offload.o
+endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
- nfp_bpf_verifier.o \
- nfp_bpf_jit.o
+ bpf/verifier.o \
+ bpf/jit.o
endif
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 97a8f00674d0..8e57fda6b8b5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -39,8 +39,8 @@
#include <linux/pkt_cls.h>
#include <linux/unistd.h>
-#include "nfp_asm.h"
-#include "nfp_bpf.h"
+#include "main.h"
+#include "../nfp_asm.h"
/* --- NFP prog --- */
/* Foreach "multiple" entries macros provide pos and next<n> pointers.
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
new file mode 100644
index 000000000000..afbdf5fd4e4f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ struct tc_cls_bpf_offload cmd = {
+ .prog = prog,
+ };
+ int ret;
+
+ if (!nfp_net_ebpf_capable(nn))
+ return -EINVAL;
+
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->dp.bpf_offload_xdp)
+ return prog ? -EBUSY : 0;
+ cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
+ } else {
+ if (!prog)
+ return 0;
+ cmd.command = TC_CLSBPF_ADD;
+ }
+
+ ret = nfp_net_bpf_offload(nn, &cmd);
+ /* Stop offload if replace not possible */
+ if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ nfp_bpf_xdp_offload(app, nn, NULL);
+ nn->dp.bpf_offload_xdp = prog && !ret;
+ return ret;
+}
+
+static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nfp_net_ebpf_capable(nn) ? "BPF" : "";
+}
+
+static int
+nfp_bpf_vnic_init(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+ struct nfp_net_bpf_priv *priv;
+ int ret;
+
+ /* Limit to single port, otherwise it's just a NIC */
+ if (id > 0) {
+ nfp_warn(app->cpp,
+ "BPF NIC doesn't support more than one port right now\n");
+ nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
+ return PTR_ERR_OR_ZERO(nn->port);
+ }
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ nn->app_priv = priv;
+ spin_lock_init(&priv->rx_filter_lock);
+ setup_timer(&priv->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
+
+ ret = nfp_app_nic_vnic_init(app, nn, id);
+ if (ret)
+ kfree(priv);
+
+ return ret;
+}
+
+static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (nn->dp.bpf_offload_xdp)
+ nfp_bpf_xdp_offload(app, nn, NULL);
+ kfree(nn->app_priv);
+}
+
+static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
+ if (!nn->dp.bpf_offload_xdp)
+ return nfp_net_bpf_offload(nn, tc->cls_bpf);
+ else
+ return -EBUSY;
+ }
+
+ return -EINVAL;
+}
+
+static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+}
+
+const struct nfp_app_type app_bpf = {
+ .id = NFP_APP_BPF_NIC,
+ .name = "ebpf",
+
+ .extra_cap = nfp_bpf_extra_cap,
+
+ .vnic_init = nfp_bpf_vnic_init,
+ .vnic_clean = nfp_bpf_vnic_clean,
+
+ .setup_tc = nfp_bpf_setup_tc,
+ .tc_busy = nfp_bpf_tc_busy,
+ .xdp_offload = nfp_bpf_xdp_offload,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 9513c80f7be5..4051e943f363 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -39,6 +39,8 @@
#include <linux/list.h>
#include <linux/types.h>
+#include "../nfp_net.h"
+
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
@@ -198,4 +200,25 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+struct nfp_net;
+struct tc_cls_bpf_offload;
+
+/**
+ * struct nfp_net_bpf_priv - per-vNIC BPF private data
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
+ */
+struct nfp_net_bpf_priv {
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+};
+
+int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
+void nfp_net_filter_stats_timer(unsigned long data);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index cc823df12c8a..78d80a364edb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -47,60 +47,59 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
-#include "nfp_bpf.h"
-#include "nfp_net_ctrl.h"
-#include "nfp_net.h"
+#include "main.h"
+#include "../nfp_net_ctrl.h"
+#include "../nfp_net.h"
void nfp_net_filter_stats_timer(unsigned long data)
{
struct nfp_net *nn = (void *)data;
+ struct nfp_net_bpf_priv *priv;
struct nfp_stat_pair latest;
- spin_lock_bh(&nn->rx_filter_lock);
+ priv = nn->app_priv;
+
+ spin_lock_bh(&priv->rx_filter_lock);
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
- mod_timer(&nn->rx_filter_stats_timer,
+ mod_timer(&priv->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
- spin_unlock_bh(&nn->rx_filter_lock);
+ spin_unlock_bh(&priv->rx_filter_lock);
latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- if (latest.pkts != nn->rx_filter.pkts)
- nn->rx_filter_change = jiffies;
+ if (latest.pkts != priv->rx_filter.pkts)
+ priv->rx_filter_change = jiffies;
- nn->rx_filter = latest;
+ priv->rx_filter = latest;
}
static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
{
- nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- nn->rx_filter_prev = nn->rx_filter;
- nn->rx_filter_change = jiffies;
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
+
+ priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ priv->rx_filter_prev = priv->rx_filter;
+ priv->rx_filter_change = jiffies;
}
static int
nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
{
- struct tc_action *a;
- LIST_HEAD(actions);
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bytes, pkts;
- pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
- bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
+ bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
bytes -= pkts * ETH_HLEN;
- nn->rx_filter_prev = nn->rx_filter;
+ priv->rx_filter_prev = priv->rx_filter;
- preempt_disable();
-
- tcf_exts_to_list(cls_bpf->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
-
- preempt_enable();
+ tcf_exts_stats_update(cls_bpf->exts,
+ bytes, pkts, priv->rx_filter_change);
return 0;
}
@@ -190,6 +189,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
unsigned int code_sz, unsigned int n_instr,
bool dense_mode)
{
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bpf_addr = dma_addr;
int err;
@@ -216,20 +216,23 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
- mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+ mod_timer(&priv->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
+
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
- spin_lock_bh(&nn->rx_filter_lock);
+ spin_lock_bh(&priv->rx_filter_lock);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
- spin_unlock_bh(&nn->rx_filter_lock);
+ spin_unlock_bh(&priv->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
- del_timer_sync(&nn->rx_filter_stats_timer);
+ del_timer_sync(&priv->rx_filter_stats_timer);
nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index b3361f9b8e5c..d696ba46f70a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -38,7 +38,7 @@
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
-#include "nfp_bpf.h"
+#include "main.h"
/* Analyzer/verifier definitions */
struct nfp_bpf_analyzer_priv {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
new file mode 100644
index 000000000000..db9750695dc7
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+#include <net/switchdev.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+
+static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_vlan);
+ u16 tmp_pop_vlan_op;
+
+ tmp_pop_vlan_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
+
+ pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
+ pop_vlan->reserved = 0;
+}
+
+static void
+nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
+ const struct tc_action *action)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_vlan);
+ struct tcf_vlan *vlan = to_vlan(action);
+ u16 tmp_push_vlan_tci;
+ u16 tmp_push_vlan_op;
+
+ tmp_push_vlan_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
+
+ push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
+ /* Set action push vlan parameters. */
+ push_vlan->reserved = 0;
+ push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
+
+ tmp_push_vlan_tci =
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
+ NFP_FL_PUSH_VLAN_CFI;
+ push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
+}
+
+static int
+nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
+ struct nfp_fl_payload *nfp_flow, bool last,
+ struct net_device *in_dev)
+{
+ size_t act_size = sizeof(struct nfp_fl_output);
+ struct net_device *out_dev;
+ u16 tmp_output_op;
+ int ifindex;
+
+ /* Set action opcode to output action. */
+ tmp_output_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
+
+ output->a_op = cpu_to_be16(tmp_output_op);
+
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
+
+ ifindex = tcf_mirred_ifindex(action);
+ out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ if (!out_dev)
+ return -EOPNOTSUPP;
+
+ /* Only offload egress ports are on the same device as the ingress
+ * port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static int
+nfp_flower_loop_action(const struct tc_action *a,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev)
+{
+ struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_output *output;
+ int err;
+
+ if (is_tcf_gact_shot(a)) {
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_output);
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_output);
+ } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
+
+ nfp_fl_pop_vlan(pop_v);
+ *a_len += sizeof(struct nfp_fl_pop_vlan);
+ } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_push_vlan(psh_v, a);
+ *a_len += sizeof(struct nfp_fl_push_vlan);
+ } else {
+ /* Currently we do not handle any other actions. */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow)
+{
+ int act_len, act_cnt, err;
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
+ nfp_flow->meta.act_len = 0;
+ act_len = 0;
+ act_cnt = 0;
+
+ tcf_exts_to_list(flow->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ if (err)
+ return err;
+ act_cnt++;
+ }
+
+ /* We optimise when the action list is small, this can unfortunately
+ * not happen once we have more than one action in the action list.
+ */
+ if (act_cnt > 1)
+ nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_flow->meta.act_len = act_len;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
new file mode 100644
index 000000000000..dd7fa9cf225f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/dst_metadata.h>
+
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_net_repr.h"
+#include "./cmsg.h"
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+static struct nfp_flower_cmsg_hdr *
+nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
+{
+ return (struct nfp_flower_cmsg_hdr *)skb->data;
+}
+
+struct sk_buff *
+nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
+ enum nfp_flower_cmsg_type_port type)
+{
+ struct nfp_flower_cmsg_hdr *ch;
+ struct sk_buff *skb;
+
+ size += NFP_FLOWER_CMSG_HLEN;
+
+ skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ ch = nfp_flower_cmsg_get_hdr(skb);
+ ch->pad = 0;
+ ch->version = NFP_FLOWER_CMSG_VER1;
+ ch->type = type;
+ skb_put(skb, size);
+
+ return skb;
+}
+
+int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
+{
+ struct nfp_flower_cmsg_portmod *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = carrier_ok;
+ msg->mtu = cpu_to_be16(repr->netdev->mtu);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
+static void
+nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_portmod *msg;
+ struct net_device *netdev;
+ bool link;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
+
+ rcu_read_lock();
+ netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ if (!netdev) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ return;
+ }
+
+ if (link) {
+ netif_carrier_on(netdev);
+ rtnl_lock();
+ dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
+ rtnl_unlock();
+ } else {
+ netif_carrier_off(netdev);
+ }
+ rcu_read_unlock();
+}
+
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+ enum nfp_flower_cmsg_type_port type;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
+
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ goto out;
+ }
+
+ type = cmsg_hdr->type;
+ switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
+ nfp_flower_cmsg_portmod_rx(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
+ nfp_flower_rx_flow_stats(app, skb);
+ break;
+ default:
+ nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
+ type);
+ }
+
+out:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
new file mode 100644
index 000000000000..cf738de170ab
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_FLOWER_CMSG_H
+#define NFP_FLOWER_CMSG_H
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "../nfp_app.h"
+
+#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_PORT BIT(1)
+#define NFP_FLOWER_LAYER_MAC BIT(2)
+#define NFP_FLOWER_LAYER_TP BIT(3)
+#define NFP_FLOWER_LAYER_IPV4 BIT(4)
+#define NFP_FLOWER_LAYER_IPV6 BIT(5)
+#define NFP_FLOWER_LAYER_CT BIT(6)
+#define NFP_FLOWER_LAYER_VXLAN BIT(7)
+
+#define NFP_FLOWER_LAYER_ETHER BIT(3)
+#define NFP_FLOWER_LAYER_ARP BIT(4)
+
+#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
+#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+
+#define NFP_FL_SC_ACT_DROP 0x80000000
+#define NFP_FL_SC_ACT_USER 0x7D000000
+#define NFP_FL_SC_ACT_POPV 0x6A000000
+#define NFP_FL_SC_ACT_NULL 0x00000000
+
+/* The maximum action list size (in bytes) supported by the NFP.
+ */
+#define NFP_FL_MAX_A_SIZ 1216
+#define NFP_FL_LW_SIZ 2
+
+/* Action opcodes */
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_NUM 32
+
+#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
+#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
+
+#define NFP_FL_OUT_FLAGS_LAST BIT(15)
+#define NFP_FL_OUT_FLAGS_USE_TUN BIT(4)
+#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
+
+#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
+#define NFP_FL_PUSH_VLAN_CFI BIT(12)
+#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+
+struct nfp_fl_output {
+ __be16 a_op;
+ __be16 flags;
+ __be32 port;
+};
+
+struct nfp_fl_push_vlan {
+ __be16 a_op;
+ __be16 reserved;
+ __be16 vlan_tpid;
+ __be16 vlan_tci;
+};
+
+struct nfp_fl_pop_vlan {
+ __be16 a_op;
+ __be16 reserved;
+};
+
+/* Metadata without L2 (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | key_layers | mask_id | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_meta_one {
+ u8 nfp_flow_key_layer;
+ u8 mask_id;
+ u16 reserved;
+};
+
+/* Metadata with L2 (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | key_type | mask_id | PCP |p| vlan outermost VID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^ ^
+ * NOTE: | TCI |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_meta_two {
+ u8 nfp_flow_key_layer;
+ u8 mask_id;
+ __be16 tci;
+};
+
+/* Port details (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | port_ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_in_port {
+ __be32 in_port;
+};
+
+/* L2 details (4W/16B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_dst, 47 - 32 | mac_addr_src, 15 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mac_addr_src, 47 - 16 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mpls outermost label | TC |B| reserved |q|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_mac_mpls {
+ u8 mac_dst[6];
+ u8 mac_src[6];
+ __be32 mpls_lse;
+};
+
+/* L4 ports (for UDP, TCP, SCTP) (1W/4B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | port_src | port_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_tp_ports {
+ __be16 port_src;
+ __be16 port_dst;
+};
+
+/* L3 IPv4 details (3W/12B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | DSCP |ECN| protocol | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv4 {
+ u8 tos;
+ u8 proto;
+ u8 ttl;
+ u8 reserved;
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+};
+
+/* L3 IPv6 details (10W/40B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | DSCP |ECN| protocol | reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_exthdr | res | ipv6_flow_label |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6 {
+ u8 tos;
+ u8 proto;
+ u8 ttl;
+ u8 reserved;
+ __be32 ipv6_flow_label_exthdr;
+ struct in6_addr ipv6_src;
+ struct in6_addr ipv6_dst;
+};
+
+/* The base header for a control message packet.
+ * Defines an 8-bit version, and an 8-bit type, padded
+ * to a 32-bit word. Rest of the packet is type-specific.
+ */
+struct nfp_flower_cmsg_hdr {
+ __be16 pad;
+ u8 type;
+ u8 version;
+};
+
+#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
+#define NFP_FLOWER_CMSG_VER1 1
+
+/* Types defined for port related control messages */
+enum nfp_flower_cmsg_type_port {
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
+ NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_MAX = 32,
+};
+
+/* NFP_FLOWER_CMSG_TYPE_PORT_MOD */
+struct nfp_flower_cmsg_portmod {
+ __be32 portnum;
+ u8 reserved;
+ u8 info;
+ __be16 mtu;
+};
+
+#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+
+enum nfp_flower_cmsg_port_type {
+ NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+};
+
+enum nfp_flower_cmsg_port_vnic_type {
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF = 0x0,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF = 0x1,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_CTRL = 0x2,
+};
+
+#define NFP_FLOWER_CMSG_PORT_TYPE GENMASK(31, 28)
+#define NFP_FLOWER_CMSG_PORT_SYS_ID GENMASK(27, 24)
+#define NFP_FLOWER_CMSG_PORT_NFP_ID GENMASK(23, 22)
+#define NFP_FLOWER_CMSG_PORT_PCI GENMASK(15, 14)
+#define NFP_FLOWER_CMSG_PORT_VNIC_TYPE GENMASK(13, 12)
+#define NFP_FLOWER_CMSG_PORT_VNIC GENMASK(11, 6)
+#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
+#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
+
+static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT);
+}
+
+static inline u32
+nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type,
+ u8 vnic, u8 q)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCI, nfp_pcie) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, type) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC, vnic) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCIE_Q, q) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT);
+}
+
+static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
+{
+ return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
+}
+
+int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
+struct sk_buff *
+nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
+ enum nfp_flower_cmsg_type_port type);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
new file mode 100644
index 000000000000..fc10f27e0a0c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <net/devlink.h>
+#include <net/dst_metadata.h>
+
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_port.h"
+#include "./cmsg.h"
+
+#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
+
+static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
+{
+ return "FLOWER";
+}
+
+static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
+{
+ return DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+static enum nfp_repr_type
+nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
+{
+ switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
+ case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
+ *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
+ port_id);
+ return NFP_REPR_TYPE_PHYS_PORT;
+
+ case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
+ *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
+ return NFP_REPR_TYPE_PF;
+ else
+ return NFP_REPR_TYPE_VF;
+ }
+
+ return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+}
+
+static struct net_device *
+nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+{
+ enum nfp_repr_type repr_type;
+ struct nfp_reprs *reprs;
+ u8 port = 0;
+
+ repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+
+ reprs = rcu_dereference(app->reprs[repr_type]);
+ if (!reprs)
+ return NULL;
+
+ if (port >= reprs->num_reprs)
+ return NULL;
+
+ return reprs->reprs[port];
+}
+
+static int
+nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
+{
+ int err;
+
+ err = nfp_flower_cmsg_portmod(repr, true);
+ if (err)
+ return err;
+
+ netif_carrier_on(repr->netdev);
+ netif_tx_wake_all_queues(repr->netdev);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_disable(repr->netdev);
+
+ return nfp_flower_cmsg_portmod(repr, false);
+}
+
+static void nfp_flower_sriov_disable(struct nfp_app *app)
+{
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
+}
+
+static int
+nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ enum nfp_flower_cmsg_port_vnic_type vnic_type,
+ enum nfp_repr_type repr_type, unsigned int cnt)
+{
+ u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_reprs *reprs, *old_reprs;
+ enum nfp_port_type port_type;
+ const u8 queue = 0;
+ int i, err;
+
+ port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
+ NFP_PORT_VF_PORT;
+
+ reprs = nfp_reprs_alloc(cnt);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_port *port;
+ u32 port_id;
+
+ reprs->reprs[i] = nfp_repr_alloc(app);
+ if (!reprs->reprs[i]) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ if (repr_type == NFP_REPR_TYPE_PF) {
+ port->pf_id = i;
+ } else {
+ port->pf_id = 0; /* For now we only support 1 PF */
+ port->vf_id = i;
+ }
+
+ eth_hw_addr_random(reprs->reprs[i]);
+
+ port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
+ i, queue);
+ err = nfp_repr_init(app, reprs->reprs[i],
+ port_id, port, priv->nn->dp.netdev);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ nfp_info(app->cpp, "%s%d Representor(%s) created\n",
+ repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
+ reprs->reprs[i]->name);
+ }
+
+ old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
+ if (IS_ERR(old_reprs)) {
+ err = PTR_ERR(old_reprs);
+ goto err_reprs_clean;
+ }
+
+ return 0;
+err_reprs_clean:
+ nfp_reprs_clean_and_free(reprs);
+ return err;
+}
+
+static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
+{
+ return nfp_flower_spawn_vnic_reprs(app,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
+ NFP_REPR_TYPE_VF, num_vfs);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+
+}
+
+static int
+nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+{
+ struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ struct nfp_reprs *reprs, *old_reprs;
+ unsigned int i;
+ int err;
+
+ reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < eth_tbl->count; i++) {
+ int phys_port = eth_tbl->ports[i].index;
+ struct nfp_port *port;
+ u32 cmsg_port_id;
+
+ reprs->reprs[phys_port] = nfp_repr_alloc(app);
+ if (!reprs->reprs[phys_port]) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
+ reprs->reprs[phys_port]);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ goto err_reprs_clean;
+ }
+ err = nfp_port_init_phy_port(app->pf, app, port, i);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ nfp_net_get_mac_addr(app->pf, port);
+
+ cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
+ err = nfp_repr_init(app, reprs->reprs[phys_port],
+ cmsg_port_id, port, priv->nn->dp.netdev);
+ if (err) {
+ nfp_port_free(port);
+ goto err_reprs_clean;
+ }
+
+ nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
+ phys_port, reprs->reprs[phys_port]->name);
+ }
+
+ old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
+ if (IS_ERR(old_reprs)) {
+ err = PTR_ERR(old_reprs);
+ goto err_reprs_clean;
+ }
+
+ return 0;
+err_reprs_clean:
+ nfp_reprs_clean_and_free(reprs);
+ return err;
+}
+
+static int nfp_flower_start(struct nfp_app *app)
+{
+ int err;
+
+ err = nfp_flower_spawn_phy_reprs(app, app->priv);
+ if (err)
+ return err;
+
+ return nfp_flower_spawn_vnic_reprs(app,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
+ NFP_REPR_TYPE_PF, 1);
+}
+
+static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ if (id > 0) {
+ nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
+ goto err_invalid_port;
+ }
+
+ priv->nn = nn;
+
+ eth_hw_addr_random(nn->dp.netdev);
+ netif_keep_dst(nn->dp.netdev);
+
+ return 0;
+
+err_invalid_port:
+ nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
+ return PTR_ERR_OR_ZERO(nn->port);
+}
+
+static int nfp_flower_init(struct nfp_app *app)
+{
+ const struct nfp_pf *pf = app->pf;
+ u64 version;
+ int err;
+
+ if (!pf->eth_tbl) {
+ nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
+ return -EINVAL;
+ }
+
+ if (!pf->mac_stats_bar) {
+ nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
+ return -EINVAL;
+ }
+
+ if (!pf->vf_cfg_bar) {
+ nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
+ return -EINVAL;
+ }
+
+ version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
+ if (err) {
+ nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
+ return err;
+ }
+
+ /* We need to ensure hardware has enough flower capabilities. */
+ if (version != NFP_FLOWER_ALLOWED_VER) {
+ nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
+ return -EINVAL;
+ }
+
+ app->priv = vzalloc(sizeof(struct nfp_flower_priv));
+ if (!app->priv)
+ return -ENOMEM;
+
+ err = nfp_flower_metadata_init(app);
+ if (err)
+ goto err_free_app_priv;
+
+ return 0;
+
+err_free_app_priv:
+ vfree(app->priv);
+ return err;
+}
+
+static void nfp_flower_clean(struct nfp_app *app)
+{
+ vfree(app->priv);
+ app->priv = NULL;
+}
+
+const struct nfp_app_type app_flower = {
+ .id = NFP_APP_FLOWER_NIC,
+ .name = "flower",
+ .ctrl_has_meta = true,
+
+ .extra_cap = nfp_flower_extra_cap,
+
+ .init = nfp_flower_init,
+ .clean = nfp_flower_clean,
+
+ .vnic_init = nfp_flower_vnic_init,
+
+ .repr_open = nfp_flower_repr_netdev_open,
+ .repr_stop = nfp_flower_repr_netdev_stop,
+
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
+ .ctrl_msg_rx = nfp_flower_cmsg_rx,
+
+ .sriov_enable = nfp_flower_sriov_enable,
+ .sriov_disable = nfp_flower_sriov_disable,
+
+ .eswitch_mode_get = eswitch_mode_get,
+ .repr_get = nfp_flower_repr_get,
+
+ .setup_tc = nfp_flower_setup_tc,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
new file mode 100644
index 000000000000..9e64c048e83f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_FLOWER_H__
+#define __NFP_FLOWER_H__ 1
+
+#include <linux/circ_buf.h>
+#include <linux/hashtable.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+struct tc_to_netdev;
+struct net_device;
+struct nfp_app;
+
+#define NFP_FL_STATS_ENTRY_RS BIT(20)
+#define NFP_FL_STATS_ELEM_RS 4
+#define NFP_FL_REPEATED_HASH_MAX BIT(17)
+#define NFP_FLOWER_HASH_BITS 19
+#define NFP_FLOWER_MASK_ENTRY_RS 256
+#define NFP_FLOWER_MASK_ELEMENT_RS 1
+#define NFP_FLOWER_MASK_HASH_BITS 10
+
+#define NFP_FL_META_FLAG_NEW_MASK 128
+#define NFP_FL_META_FLAG_LAST_MASK 1
+
+#define NFP_FL_MASK_REUSE_TIME_NS 40000
+#define NFP_FL_MASK_ID_LOCATION 1
+
+struct nfp_fl_mask_id {
+ struct circ_buf mask_id_free_list;
+ struct timespec64 *last_used;
+ u8 init_unallocated;
+};
+
+struct nfp_fl_stats_id {
+ struct circ_buf free_list;
+ u32 init_unalloc;
+ u8 repeated_em_count;
+};
+
+/**
+ * struct nfp_flower_priv - Flower APP per-vNIC priv data
+ * @nn: Pointer to vNIC
+ * @mask_id_seed: Seed used for mask hash table
+ * @flower_version: HW version of flower
+ * @stats_ids: List of free stats ids
+ * @mask_ids: List of free mask ids
+ * @mask_table: Hash table used to store masks
+ * @flow_table: Hash table used to store flower rules
+ */
+struct nfp_flower_priv {
+ struct nfp_net *nn;
+ u32 mask_id_seed;
+ u64 flower_version;
+ struct nfp_fl_stats_id stats_ids;
+ struct nfp_fl_mask_id mask_ids;
+ DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
+ DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+};
+
+struct nfp_fl_key_ls {
+ u32 key_layer_two;
+ u8 key_layer;
+ int key_size;
+};
+
+struct nfp_fl_rule_metadata {
+ u8 key_len;
+ u8 mask_len;
+ u8 act_len;
+ u8 flags;
+ __be32 host_ctx_id;
+ __be64 host_cookie __packed;
+ __be64 flow_version __packed;
+ __be32 shortcut;
+};
+
+struct nfp_fl_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 used;
+};
+
+struct nfp_fl_payload {
+ struct nfp_fl_rule_metadata meta;
+ unsigned long tc_flower_cookie;
+ struct hlist_node link;
+ struct rcu_head rcu;
+ spinlock_t lock; /* lock stats */
+ struct nfp_fl_stats stats;
+ char *unmasked_data;
+ char *mask_data;
+ char *action_data;
+};
+
+struct nfp_fl_stats_frame {
+ __be32 stats_con_id;
+ __be32 pkt_count;
+ __be64 byte_count;
+ __be64 stats_cookie;
+};
+
+int nfp_flower_metadata_init(struct nfp_app *app);
+void nfp_flower_metadata_cleanup(struct nfp_app *app);
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc);
+int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+ struct nfp_fl_key_ls *key_ls,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_compile_flow_metadata(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
+ struct nfp_fl_payload *nfp_flow);
+int nfp_modify_flow_metadata(struct nfp_app *app,
+ struct nfp_fl_payload *nfp_flow);
+
+struct nfp_fl_payload *
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+struct nfp_fl_payload *
+nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
new file mode 100644
index 000000000000..0e08404480ef
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+
+static void
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+ struct tc_cls_flower_offload *flow, u8 key_type,
+ bool mask_version)
+{
+ struct flow_dissector_key_vlan *flow_vlan;
+ u16 tmp_tci;
+
+ /* Populate the metadata frame. */
+ frame->nfp_flow_key_layer = key_type;
+ frame->mask_id = ~0;
+
+ if (mask_version) {
+ frame->tci = cpu_to_be16(~0);
+ return;
+ }
+
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ flow->key);
+
+ /* Populate the tci field. */
+ if (!flow_vlan->vlan_id) {
+ tmp_tci = 0;
+ } else {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ flow_vlan->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ flow_vlan->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ }
+ frame->tci = cpu_to_be16(tmp_tci);
+}
+
+static void
+nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+{
+ frame->nfp_flow_key_layer = key_type;
+ frame->mask_id = 0;
+ frame->reserved = 0;
+}
+
+static int
+nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
+ bool mask_version)
+{
+ if (mask_version) {
+ frame->in_port = cpu_to_be32(~0);
+ return 0;
+ }
+
+ frame->in_port = cpu_to_be32(cmsg_port);
+
+ return 0;
+}
+
+static void
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_eth_addrs *flow_mac;
+
+ flow_mac = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target);
+
+ memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
+
+ /* Populate mac frame. */
+ ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
+ ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+
+ if (mask_version)
+ frame->mpls_lse = cpu_to_be32(~0);
+}
+
+static void
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ports *flow_tp;
+
+ flow_tp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target);
+
+ frame->port_src = flow_tp->src;
+ frame->port_dst = flow_tp->dst;
+}
+
+static void
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *flow_ipv4;
+ struct flow_dissector_key_basic *flow_basic;
+
+ flow_ipv4 = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+
+ flow_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+
+ /* Populate IPv4 frame. */
+ frame->reserved = 0;
+ frame->ipv4_src = flow_ipv4->src;
+ frame->ipv4_dst = flow_ipv4->dst;
+ frame->proto = flow_basic->ip_proto;
+ /* Wildcard TOS/TTL for now. */
+ frame->tos = 0;
+ frame->ttl = 0;
+}
+
+static void
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv6_addrs *flow_ipv6;
+ struct flow_dissector_key_basic *flow_basic;
+
+ flow_ipv6 = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target);
+
+ flow_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+
+ /* Populate IPv6 frame. */
+ frame->reserved = 0;
+ frame->ipv6_src = flow_ipv6->src;
+ frame->ipv6_dst = flow_ipv6->dst;
+ frame->proto = flow_basic->ip_proto;
+ /* Wildcard LABEL/TOS/TTL for now. */
+ frame->ipv6_flow_label_exthdr = 0;
+ frame->tos = 0;
+ frame->ttl = 0;
+}
+
+int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+ struct nfp_fl_key_ls *key_ls,
+ struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow)
+{
+ int err;
+ u8 *ext;
+ u8 *msk;
+
+ memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
+ memset(nfp_flow->mask_data, 0, key_ls->key_size);
+
+ ext = nfp_flow->unmasked_data;
+ msk = nfp_flow->mask_data;
+ if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_two);
+ msk += sizeof(struct nfp_flower_meta_two);
+
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+ } else {
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
+ key_ls->key_layer);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
+ key_ls->key_layer);
+ ext += sizeof(struct nfp_flower_meta_one);
+ msk += sizeof(struct nfp_flower_meta_one);
+ }
+
+ if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
+ /* Additional Metadata Fields.
+ * Currently unsupported.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
+ /* Populate Exact MAC Data. */
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
+ flow, false);
+ /* Populate Mask MAC Data. */
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_mac_mpls);
+ msk += sizeof(struct nfp_flower_mac_mpls);
+ }
+
+ if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
+ /* Populate Exact TP Data. */
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
+ flow, false);
+ /* Populate Mask TP Data. */
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_tp_ports);
+ msk += sizeof(struct nfp_flower_tp_ports);
+ }
+
+ if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
+ /* Populate Exact IPv4 Data. */
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
+ flow, false);
+ /* Populate Mask IPv4 Data. */
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_ipv4);
+ msk += sizeof(struct nfp_flower_ipv4);
+ }
+
+ if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
+ /* Populate Exact IPv4 Data. */
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
+ flow, false);
+ /* Populate Mask IPv4 Data. */
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
+ flow, true);
+ ext += sizeof(struct nfp_flower_ipv6);
+ msk += sizeof(struct nfp_flower_ipv6);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
new file mode 100644
index 000000000000..fec0ff2ca94f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_app.h"
+
+struct nfp_mask_id_table {
+ struct hlist_node link;
+ u32 hash_key;
+ u32 ref_cnt;
+ u8 mask_id;
+};
+
+static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ /* Check if buffer is full. */
+ if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
+ NFP_FL_STATS_ELEM_RS -
+ NFP_FL_STATS_ELEM_RS + 1))
+ return -ENOBUFS;
+
+ memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
+ ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
+static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ u32 freed_stats_id, temp_stats_id;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ /* Check for unallocated entries first. */
+ if (priv->stats_ids.init_unalloc > 0) {
+ *stats_context_id = priv->stats_ids.init_unalloc - 1;
+ priv->stats_ids.init_unalloc--;
+ return 0;
+ }
+
+ /* Check if buffer is empty. */
+ if (ring->head == ring->tail) {
+ *stats_context_id = freed_stats_id;
+ return -ENOENT;
+ }
+
+ memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
+ *stats_context_id = temp_stats_id;
+ memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
+ ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
+/* Must be called with either RTNL or rcu_read_lock */
+struct nfp_fl_payload *
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flower_entry;
+
+ hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
+ tc_flower_cookie)
+ if (flower_entry->tc_flower_cookie == tc_flower_cookie)
+ return flower_entry;
+
+ return NULL;
+}
+
+static void
+nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
+{
+ struct nfp_fl_payload *nfp_flow;
+ unsigned long flower_cookie;
+
+ flower_cookie = be64_to_cpu(stats->stats_cookie);
+
+ rcu_read_lock();
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ if (!nfp_flow)
+ goto exit_rcu_unlock;
+
+ if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
+ goto exit_rcu_unlock;
+
+ spin_lock(&nfp_flow->lock);
+ nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
+ nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
+ nfp_flow->stats.used = jiffies;
+ spin_unlock(&nfp_flow->lock);
+
+exit_rcu_unlock:
+ rcu_read_unlock();
+}
+
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ struct nfp_fl_stats_frame *stats_frame;
+ unsigned char *msg;
+ int i;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ stats_frame = (struct nfp_fl_stats_frame *)msg;
+ for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
+ nfp_flower_update_stats(app, stats_frame + i);
+}
+
+static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct circ_buf *ring;
+ struct timespec64 now;
+
+ ring = &priv->mask_ids.mask_id_free_list;
+ /* Checking if buffer is full. */
+ if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
+ return -ENOBUFS;
+
+ memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
+ (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+
+ getnstimeofday64(&now);
+ priv->mask_ids.last_used[mask_id] = now;
+
+ return 0;
+}
+
+static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct timespec64 delta, now;
+ struct circ_buf *ring;
+ u8 temp_id, freed_id;
+
+ ring = &priv->mask_ids.mask_id_free_list;
+ freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
+ /* Checking for unallocated entries first. */
+ if (priv->mask_ids.init_unallocated > 0) {
+ *mask_id = priv->mask_ids.init_unallocated;
+ priv->mask_ids.init_unallocated--;
+ return 0;
+ }
+
+ /* Checking if buffer is empty. */
+ if (ring->head == ring->tail)
+ goto err_not_found;
+
+ memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
+ *mask_id = temp_id;
+
+ getnstimeofday64(&now);
+ delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+
+ if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ goto err_not_found;
+
+ memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
+ (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+
+ return 0;
+
+err_not_found:
+ *mask_id = freed_id;
+ return -ENOENT;
+}
+
+static int
+nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_mask_id_table *mask_entry;
+ unsigned long hash_key;
+ u8 mask_id;
+
+ if (nfp_mask_alloc(app, &mask_id))
+ return -ENOENT;
+
+ mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
+ if (!mask_entry) {
+ nfp_release_mask_id(app, mask_id);
+ return -ENOMEM;
+ }
+
+ INIT_HLIST_NODE(&mask_entry->link);
+ mask_entry->mask_id = mask_id;
+ hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
+ mask_entry->hash_key = hash_key;
+ mask_entry->ref_cnt = 1;
+ hash_add(priv->mask_table, &mask_entry->link, hash_key);
+
+ return mask_id;
+}
+
+static struct nfp_mask_id_table *
+nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_mask_id_table *mask_entry;
+ unsigned long hash_key;
+
+ hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
+
+ hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
+ if (mask_entry->hash_key == hash_key)
+ return mask_entry;
+
+ return NULL;
+}
+
+static int
+nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
+{
+ struct nfp_mask_id_table *mask_entry;
+
+ mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
+ if (!mask_entry)
+ return -ENOENT;
+
+ mask_entry->ref_cnt++;
+
+ /* Casting u8 to int for later use. */
+ return mask_entry->mask_id;
+}
+
+static bool
+nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
+ u8 *meta_flags, u8 *mask_id)
+{
+ int id;
+
+ id = nfp_find_in_mask_table(app, mask_data, mask_len);
+ if (id < 0) {
+ id = nfp_add_mask_table(app, mask_data, mask_len);
+ if (id < 0)
+ return false;
+ *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+ }
+ *mask_id = id;
+
+ return true;
+}
+
+static bool
+nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
+ u8 *meta_flags, u8 *mask_id)
+{
+ struct nfp_mask_id_table *mask_entry;
+
+ mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
+ if (!mask_entry)
+ return false;
+
+ *mask_id = mask_entry->mask_id;
+ mask_entry->ref_cnt--;
+ if (!mask_entry->ref_cnt) {
+ hash_del(&mask_entry->link);
+ nfp_release_mask_id(app, *mask_id);
+ kfree(mask_entry);
+ if (meta_flags)
+ *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+ }
+
+ return true;
+}
+
+int nfp_compile_flow_metadata(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
+ struct nfp_fl_payload *nfp_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *check_entry;
+ u8 new_mask_id;
+ u32 stats_cxt;
+
+ if (nfp_get_stats_entry(app, &stats_cxt))
+ return -ENOENT;
+
+ nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
+ nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+
+ new_mask_id = 0;
+ if (!nfp_check_mask_add(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ &nfp_flow->meta.flags, &new_mask_id)) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
+ return -ENOENT;
+ }
+
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
+
+ /* Update flow payload with mask ids. */
+ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+ nfp_flow->stats.pkts = 0;
+ nfp_flow->stats.bytes = 0;
+ nfp_flow->stats.used = jiffies;
+
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie);
+ if (check_entry) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
+
+ if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ NULL, &new_mask_id))
+ return -EINVAL;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int nfp_modify_flow_metadata(struct nfp_app *app,
+ struct nfp_fl_payload *nfp_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ u8 new_mask_id = 0;
+ u32 temp_ctx_id;
+
+ nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
+ &new_mask_id);
+
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
+
+ /* Update flow payload with mask ids. */
+ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+
+ /* Release the stats ctx id. */
+ temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ return nfp_release_stats_entry(app, temp_ctx_id);
+}
+
+int nfp_flower_metadata_init(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ hash_init(priv->mask_table);
+ hash_init(priv->flow_table);
+ get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
+
+ /* Init ring buffer and unallocated mask_ids. */
+ priv->mask_ids.mask_id_free_list.buf =
+ kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
+ NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
+ if (!priv->mask_ids.mask_id_free_list.buf)
+ return -ENOMEM;
+
+ priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
+
+ /* Init timestamps for mask id*/
+ priv->mask_ids.last_used =
+ kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
+ sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
+ if (!priv->mask_ids.last_used)
+ goto err_free_mask_id;
+
+ /* Init ring buffer and unallocated stats_ids. */
+ priv->stats_ids.free_list.buf =
+ vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ if (!priv->stats_ids.free_list.buf)
+ goto err_free_last_used;
+
+ priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+
+ return 0;
+
+err_free_last_used:
+ kfree(priv->stats_ids.free_list.buf);
+err_free_mask_id:
+ kfree(priv->mask_ids.mask_id_free_list.buf);
+ return -ENOMEM;
+}
+
+void nfp_flower_metadata_cleanup(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ if (!priv)
+ return;
+
+ kfree(priv->mask_ids.mask_id_free_list.buf);
+ kfree(priv->mask_ids.last_used);
+ vfree(priv->stats_ids.free_list.buf);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
new file mode 100644
index 000000000000..4ad10bd5e139
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <net/devlink.h>
+#include <net/pkt_cls.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+
+static int
+nfp_flower_xmit_flow(struct net_device *netdev,
+ struct nfp_fl_payload *nfp_flow, u8 mtype)
+{
+ u32 meta_len, key_len, mask_len, act_len, tot_len;
+ struct nfp_repr *priv = netdev_priv(netdev);
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ meta_len = sizeof(struct nfp_fl_rule_metadata);
+ key_len = nfp_flow->meta.key_len;
+ mask_len = nfp_flow->meta.mask_len;
+ act_len = nfp_flow->meta.act_len;
+
+ tot_len = meta_len + key_len + mask_len + act_len;
+
+ /* Convert to long words as firmware expects
+ * lengths in units of NFP_FL_LW_SIZ.
+ */
+ nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
+ nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
+ nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
+
+ skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, &nfp_flow->meta, meta_len);
+ memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
+ memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
+ memcpy(&msg[meta_len + key_len + mask_len],
+ nfp_flow->action_data, act_len);
+
+ /* Convert back to bytes as software expects
+ * lengths in units of bytes.
+ */
+ nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
+ nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
+ nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
+
+ nfp_ctrl_tx(priv->app->ctrl, skb);
+
+ return 0;
+}
+
+static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
+{
+ return dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
+ dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
+ dissector_uses_key(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS) ||
+ dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
+}
+
+static int
+nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+ struct tc_cls_flower_offload *flow)
+{
+ struct flow_dissector_key_control *mask_enc_ctl;
+ struct flow_dissector_key_basic *mask_basic;
+ struct flow_dissector_key_basic *key_basic;
+ u32 key_layer_two;
+ u8 key_layer;
+ int key_size;
+
+ mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->mask);
+
+ mask_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->mask);
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
+ /* We are expecting a tunnel. For now we ignore offloading. */
+ if (mask_enc_ctl->addr_type)
+ return -EOPNOTSUPP;
+
+ if (mask_basic->n_proto) {
+ /* Ethernet type is present in the key. */
+ switch (key_basic->n_proto) {
+ case cpu_to_be16(ETH_P_IP):
+ key_layer |= NFP_FLOWER_LAYER_IPV4;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ break;
+
+ /* Currently we do not offload ARP
+ * because we rely on it to get to the host.
+ */
+ case cpu_to_be16(ETH_P_ARP):
+ return -EOPNOTSUPP;
+
+ /* Will be included in layer 2. */
+ case cpu_to_be16(ETH_P_8021Q):
+ break;
+
+ default:
+ /* Other ethtype - we need check the masks for the
+ * remainder of the key to ensure we can offload.
+ */
+ if (nfp_flower_check_higher_than_mac(flow))
+ return -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if (mask_basic->ip_proto) {
+ /* Ethernet type is present in the key. */
+ switch (key_basic->ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ key_layer |= NFP_FLOWER_LAYER_TP;
+ key_size += sizeof(struct nfp_flower_tp_ports);
+ break;
+ default:
+ /* Other ip proto - we need check the masks for the
+ * remainder of the key to ensure we can offload.
+ */
+ return -EOPNOTSUPP;
+ }
+ }
+
+ ret_key_ls->key_layer = key_layer;
+ ret_key_ls->key_layer_two = key_layer_two;
+ ret_key_ls->key_size = key_size;
+
+ return 0;
+}
+
+static struct nfp_fl_payload *
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
+{
+ struct nfp_fl_payload *flow_pay;
+
+ flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
+ if (!flow_pay)
+ return NULL;
+
+ flow_pay->meta.key_len = key_layer->key_size;
+ flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
+ if (!flow_pay->unmasked_data)
+ goto err_free_flow;
+
+ flow_pay->meta.mask_len = key_layer->key_size;
+ flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
+ if (!flow_pay->mask_data)
+ goto err_free_unmasked;
+
+ flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
+ if (!flow_pay->action_data)
+ goto err_free_mask;
+
+ flow_pay->meta.flags = 0;
+ spin_lock_init(&flow_pay->lock);
+
+ return flow_pay;
+
+err_free_mask:
+ kfree(flow_pay->mask_data);
+err_free_unmasked:
+ kfree(flow_pay->unmasked_data);
+err_free_flow:
+ kfree(flow_pay);
+ return NULL;
+}
+
+/**
+ * nfp_flower_add_offload() - Adds a new flow to hardware.
+ * @app: Pointer to the APP handle
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ *
+ * Adds a new flow to the repeated hash structure and action payload.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+static int
+nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flow_pay;
+ struct nfp_fl_key_ls *key_layer;
+ int err;
+
+ key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
+ if (!key_layer)
+ return -ENOMEM;
+
+ err = nfp_flower_calculate_key_layers(key_layer, flow);
+ if (err)
+ goto err_free_key_ls;
+
+ flow_pay = nfp_flower_allocate_new(key_layer);
+ if (!flow_pay) {
+ err = -ENOMEM;
+ goto err_free_key_ls;
+ }
+
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_flower_compile_action(flow, netdev, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_compile_flow_metadata(app, flow, flow_pay);
+ if (err)
+ goto err_destroy_flow;
+
+ err = nfp_flower_xmit_flow(netdev, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (err)
+ goto err_destroy_flow;
+
+ INIT_HLIST_NODE(&flow_pay->link);
+ flow_pay->tc_flower_cookie = flow->cookie;
+ hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+
+ /* Deallocate flow payload when flower rule has been destroyed. */
+ kfree(key_layer);
+
+ return 0;
+
+err_destroy_flow:
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ kfree(flow_pay);
+err_free_key_ls:
+ kfree(key_layer);
+ return err;
+}
+
+/**
+ * nfp_flower_del_offload() - Removes a flow from hardware.
+ * @app: Pointer to the APP handle
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure
+ *
+ * Removes a flow from the repeated hash structure and clears the
+ * action payload.
+ *
+ * Return: negative value on error, 0 if removed successfully.
+ */
+static int
+nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow)
+{
+ struct nfp_fl_payload *nfp_flow;
+ int err;
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ if (!nfp_flow)
+ return -ENOENT;
+
+ err = nfp_modify_flow_metadata(app, nfp_flow);
+ if (err)
+ goto err_free_flow;
+
+ err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (err)
+ goto err_free_flow;
+
+err_free_flow:
+ hash_del_rcu(&nfp_flow->link);
+ kfree(nfp_flow->action_data);
+ kfree(nfp_flow->mask_data);
+ kfree(nfp_flow->unmasked_data);
+ kfree_rcu(nfp_flow, rcu);
+ return err;
+}
+
+/**
+ * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
+ * @app: Pointer to the APP handle
+ * @flow: TC flower classifier offload structure
+ *
+ * Populates a flow statistics structure which which corresponds to a
+ * specific flow.
+ *
+ * Return: negative value on error, 0 if stats populated successfully.
+ */
+static int
+nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
+{
+ struct nfp_fl_payload *nfp_flow;
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ if (!nfp_flow)
+ return -EINVAL;
+
+ spin_lock_bh(&nfp_flow->lock);
+ tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
+ nfp_flow->stats.pkts, nfp_flow->stats.used);
+
+ nfp_flow->stats.pkts = 0;
+ nfp_flow->stats.bytes = 0;
+ spin_unlock_bh(&nfp_flow->lock);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flower)
+{
+ switch (flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return nfp_flower_add_offload(app, netdev, flower);
+ case TC_CLSFLOWER_DESTROY:
+ return nfp_flower_del_offload(app, netdev, flower);
+ case TC_CLSFLOWER_STATS:
+ return nfp_flower_get_stats(app, flower);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc)
+{
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+
+ if (!eth_proto_is_802_3(proto))
+ return -EOPNOTSUPP;
+
+ if (tc->type != TC_SETUP_CLSFLOWER)
+ return -EINVAL;
+
+ return nfp_flower_repr_offload(app, netdev, tc->cls_flower);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
new file mode 100644
index 000000000000..c704c022574f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net_repr.h"
+
+static const struct nfp_app_type *apps[] = {
+ &app_nic,
+ &app_bpf,
+#ifdef CONFIG_NFP_APP_FLOWER
+ &app_flower,
+#endif
+};
+
+const char *nfp_app_mip_name(struct nfp_app *app)
+{
+ if (!app || !app->pf->mip)
+ return "";
+ return nfp_mip_name(app->pf->mip);
+}
+
+struct sk_buff *
+nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
+{
+ struct sk_buff *skb;
+
+ if (nfp_app_ctrl_has_meta(app))
+ size += 8;
+
+ skb = alloc_skb(size, priority);
+ if (!skb)
+ return NULL;
+
+ if (nfp_app_ctrl_has_meta(app))
+ skb_reserve(skb, 8);
+
+ return skb;
+}
+
+struct nfp_reprs *
+nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
+ struct nfp_reprs *reprs)
+{
+ struct nfp_reprs *old;
+
+ old = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (reprs && old) {
+ old = ERR_PTR(-EBUSY);
+ goto exit_unlock;
+ }
+
+ rcu_assign_pointer(app->reprs[type], reprs);
+
+exit_unlock:
+ return old;
+}
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
+{
+ struct nfp_app *app;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(apps); i++)
+ if (apps[i]->id == id)
+ break;
+ if (i == ARRAY_SIZE(apps)) {
+ nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON(!apps[i]->name || !apps[i]->vnic_init))
+ return ERR_PTR(-EINVAL);
+
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ return ERR_PTR(-ENOMEM);
+
+ app->pf = pf;
+ app->cpp = pf->cpp;
+ app->pdev = pf->pdev;
+ app->type = apps[i];
+
+ return app;
+}
+
+void nfp_app_free(struct nfp_app *app)
+{
+ kfree(app);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
new file mode 100644
index 000000000000..5d714e10d9a9
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_APP_H
+#define _NFP_APP_H 1
+
+#include <net/devlink.h>
+
+#include "nfp_net_repr.h"
+
+struct bpf_prog;
+struct net_device;
+struct pci_dev;
+struct sk_buff;
+struct tc_to_netdev;
+struct sk_buff;
+struct nfp_app;
+struct nfp_cpp;
+struct nfp_pf;
+struct nfp_repr;
+struct nfp_net;
+
+enum nfp_app_id {
+ NFP_APP_CORE_NIC = 0x1,
+ NFP_APP_BPF_NIC = 0x2,
+ NFP_APP_FLOWER_NIC = 0x3,
+};
+
+extern const struct nfp_app_type app_nic;
+extern const struct nfp_app_type app_bpf;
+extern const struct nfp_app_type app_flower;
+
+/**
+ * struct nfp_app_type - application definition
+ * @id: application ID
+ * @name: application name
+ * @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
+ *
+ * Callbacks
+ * @init: perform basic app checks and init
+ * @clean: clean app state
+ * @extra_cap: extra capabilities string
+ * @vnic_init: init vNICs (assign port types, etc.)
+ * @vnic_clean: clean up app's vNIC state
+ * @repr_open: representor netdev open callback
+ * @repr_stop: representor netdev stop callback
+ * @start: start application logic
+ * @stop: stop application logic
+ * @ctrl_msg_rx: control message handler
+ * @setup_tc: setup TC ndo
+ * @tc_busy: TC HW offload busy (rules loaded)
+ * @xdp_offload: offload an XDP program
+ * @eswitch_mode_get: get SR-IOV eswitch mode
+ * @sriov_enable: app-specific sriov initialisation
+ * @sriov_disable: app-specific sriov clean-up
+ * @repr_get: get representor netdev
+ */
+struct nfp_app_type {
+ enum nfp_app_id id;
+ const char *name;
+
+ bool ctrl_has_meta;
+
+ int (*init)(struct nfp_app *app);
+ void (*clean)(struct nfp_app *app);
+
+ const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
+
+ int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id);
+ void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
+
+ int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+
+ int (*start)(struct nfp_app *app);
+ void (*stop)(struct nfp_app *app);
+
+ void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
+
+ int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc);
+ bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+
+ int (*sriov_enable)(struct nfp_app *app, int num_vfs);
+ void (*sriov_disable)(struct nfp_app *app);
+
+ enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
+ struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
+};
+
+/**
+ * struct nfp_app - NFP application container
+ * @pdev: backpointer to PCI device
+ * @pf: backpointer to NFP PF structure
+ * @cpp: pointer to the CPP handle
+ * @ctrl: pointer to ctrl vNIC struct
+ * @reprs: array of pointers to representors
+ * @type: pointer to const application ops and info
+ * @priv: app-specific priv data
+ */
+struct nfp_app {
+ struct pci_dev *pdev;
+ struct nfp_pf *pf;
+ struct nfp_cpp *cpp;
+
+ struct nfp_net *ctrl;
+ struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1];
+
+ const struct nfp_app_type *type;
+ void *priv;
+};
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+
+static inline int nfp_app_init(struct nfp_app *app)
+{
+ if (!app->type->init)
+ return 0;
+ return app->type->init(app);
+}
+
+static inline void nfp_app_clean(struct nfp_app *app)
+{
+ if (app->type->clean)
+ app->type->clean(app);
+}
+
+static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ return app->type->vnic_init(app, nn, id);
+}
+
+static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (app->type->vnic_clean)
+ app->type->vnic_clean(app, nn);
+}
+
+static inline int nfp_app_repr_open(struct nfp_app *app, struct nfp_repr *repr)
+{
+ if (!app->type->repr_open)
+ return -EINVAL;
+ return app->type->repr_open(app, repr);
+}
+
+static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
+{
+ if (!app->type->repr_stop)
+ return -EINVAL;
+ return app->type->repr_stop(app, repr);
+}
+
+static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ app->ctrl = ctrl;
+ if (!app->type->start)
+ return 0;
+ return app->type->start(app);
+}
+
+static inline void nfp_app_stop(struct nfp_app *app)
+{
+ if (!app->type->stop)
+ return;
+ app->type->stop(app);
+}
+
+static inline const char *nfp_app_name(struct nfp_app *app)
+{
+ if (!app)
+ return "";
+ return app->type->name;
+}
+
+static inline bool nfp_app_needs_ctrl_vnic(struct nfp_app *app)
+{
+ return app && app->type->ctrl_msg_rx;
+}
+
+static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
+{
+ return app->type->ctrl_has_meta;
+}
+
+static inline const char *nfp_app_extra_cap(struct nfp_app *app,
+ struct nfp_net *nn)
+{
+ if (!app || !app->type->extra_cap)
+ return "";
+ return app->type->extra_cap(app, nn);
+}
+
+static inline bool nfp_app_has_tc(struct nfp_app *app)
+{
+ return app && app->type->setup_tc;
+}
+
+static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (!app || !app->type->tc_busy)
+ return false;
+ return app->type->tc_busy(app, nn);
+}
+
+static inline int nfp_app_setup_tc(struct nfp_app *app,
+ struct net_device *netdev,
+ u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ if (!app || !app->type->setup_tc)
+ return -EOPNOTSUPP;
+ return app->type->setup_tc(app, netdev, handle, proto, tc);
+}
+
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->xdp_offload)
+ return -EOPNOTSUPP;
+ return app->type->xdp_offload(app, nn, prog);
+}
+
+static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
+{
+ return nfp_ctrl_tx(app->ctrl, skb);
+}
+
+static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ app->type->ctrl_msg_rx(app, skb);
+}
+
+static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
+{
+ if (!app->type->eswitch_mode_get)
+ return -EOPNOTSUPP;
+
+ *mode = app->type->eswitch_mode_get(app);
+
+ return 0;
+}
+
+static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs)
+{
+ if (!app || !app->type->sriov_enable)
+ return -EOPNOTSUPP;
+ return app->type->sriov_enable(app, num_vfs);
+}
+
+static inline void nfp_app_sriov_disable(struct nfp_app *app)
+{
+ if (app && app->type->sriov_disable)
+ app->type->sriov_disable(app);
+}
+
+static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+{
+ if (unlikely(!app || !app->type->repr_get))
+ return NULL;
+
+ return app->type->repr_get(app, id);
+}
+
+struct nfp_reprs *
+nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
+ struct nfp_reprs *reprs);
+
+const char *nfp_app_mip_name(struct nfp_app *app);
+struct sk_buff *
+nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
+void nfp_app_free(struct nfp_app *app);
+
+/* Callbacks shared between apps */
+
+int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
new file mode 100644
index 000000000000..4e37c81f9eaf
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net.h"
+#include "nfp_port.h"
+
+static int
+nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_net *nn, unsigned int id)
+{
+ int err;
+
+ if (!pf->eth_tbl)
+ return 0;
+
+ nn->port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, nn->dp.netdev);
+ if (IS_ERR(nn->port))
+ return PTR_ERR(nn->port);
+
+ err = nfp_port_init_phy_port(pf, app, nn->port, id);
+ if (err) {
+ nfp_port_free(nn->port);
+ return err;
+ }
+
+ return nn->port->type == NFP_PORT_INVALID;
+}
+
+int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ int err;
+
+ err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id);
+ if (err)
+ return err < 0 ? err : 0;
+
+ nfp_net_get_mac_addr(app->pf, nn->port);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 22484b6fd3e8..d2b535739d2b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,7 +34,7 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
-#include "nfp_bpf.h"
+#include <linux/types.h>
#define REG_NONE 0
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
new file mode 100644
index 000000000000..6c9f29c2e975
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rtnetlink.h>
+#include <net/devlink.h>
+
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_port.h"
+
+static int
+nfp_devlink_fill_eth_port(struct nfp_port *port,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_eth_table_port *eth_port;
+
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EINVAL;
+
+ memcpy(copy, eth_port, sizeof(*eth_port));
+
+ return 0;
+}
+
+static int
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+
+ return nfp_devlink_fill_eth_port(port, copy);
+}
+
+static int
+nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
+{
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_eth_config_start(pf->cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ ret = __nfp_eth_set_split(nsp, lanes);
+ if (ret) {
+ nfp_eth_config_cleanup_end(nsp);
+ return ret;
+ }
+
+ ret = nfp_eth_config_commit_end(nsp);
+ if (ret < 0)
+ return ret;
+ if (ret) /* no change */
+ return 0;
+
+ return nfp_net_refresh_port_table_sync(pf);
+}
+
+static int
+nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+ unsigned int count)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ if (count < 2)
+ return -EINVAL;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (eth_port.is_split || eth_port.port_lanes % count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index,
+ eth_port.port_lanes / count);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+static int
+nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (!eth_port.is_split) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ int ret;
+
+ mutex_lock(&pf->lock);
+ if (!pf->app) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = nfp_app_eswitch_mode_get(pf->app, mode);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+const struct devlink_ops nfp_devlink_ops = {
+ .port_split = nfp_devlink_port_split,
+ .port_unsplit = nfp_devlink_port_unsplit,
+ .eswitch_mode_get = nfp_devlink_eswitch_mode_get,
+};
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
+{
+ struct nfp_eth_table_port eth_port;
+ struct devlink *devlink;
+ int ret;
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port(port, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ if (eth_port.is_split)
+ devlink_port_split_set(&port->dl_port, eth_port.label_port);
+
+ devlink = priv_to_devlink(app->pf);
+
+ return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+}
+
+void nfp_devlink_port_unregister(struct nfp_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
new file mode 100644
index 000000000000..f0dcf45aeec1
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_main.h"
+
+#define NFP_TEMP_MAX (95 * 1000)
+#define NFP_TEMP_CRIT (105 * 1000)
+
+#define NFP_POWER_MAX (25 * 1000 * 1000)
+
+static int nfp_hwmon_sensor_id(enum hwmon_sensor_types type, int channel)
+{
+ if (type == hwmon_temp)
+ return NFP_SENSOR_CHIP_TEMPERATURE;
+ if (type == hwmon_power)
+ return NFP_SENSOR_ASSEMBLY_POWER + channel;
+ return -EINVAL;
+}
+
+static int
+nfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ static const struct {
+ enum hwmon_sensor_types type;
+ u32 attr;
+ long val;
+ } const_vals[] = {
+ { hwmon_temp, hwmon_temp_max, NFP_TEMP_MAX },
+ { hwmon_temp, hwmon_temp_crit, NFP_TEMP_CRIT },
+ { hwmon_power, hwmon_power_max, NFP_POWER_MAX },
+ };
+ struct nfp_pf *pf = dev_get_drvdata(dev);
+ enum nfp_nsp_sensor_id id;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(const_vals); i++)
+ if (const_vals[i].type == type && const_vals[i].attr == attr) {
+ *val = const_vals[i].val;
+ return 0;
+ }
+
+ err = nfp_hwmon_sensor_id(type, channel);
+ if (err < 0)
+ return err;
+ id = err;
+
+ if (!(pf->nspi->sensor_mask & BIT(id)))
+ return -EOPNOTSUPP;
+
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return nfp_hwmon_read_sensor(pf->cpp, id, val);
+ if (type == hwmon_power && attr == hwmon_power_input)
+ return nfp_hwmon_read_sensor(pf->cpp, id, val);
+
+ return -EINVAL;
+}
+
+static umode_t
+nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type == hwmon_temp) {
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ return 0444;
+ }
+ } else if (type == hwmon_power) {
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max:
+ return 0444;
+ }
+ }
+ return 0;
+}
+
+static u32 nfp_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_chip = {
+ .type = hwmon_chip,
+ .config = nfp_chip_config,
+};
+
+static u32 nfp_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_temp = {
+ .type = hwmon_temp,
+ .config = nfp_temp_config,
+};
+
+static u32 nfp_power_config[] = {
+ HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_power = {
+ .type = hwmon_power,
+ .config = nfp_power_config,
+};
+
+static const struct hwmon_channel_info *nfp_hwmon_info[] = {
+ &nfp_chip,
+ &nfp_temp,
+ &nfp_power,
+ NULL
+};
+
+static const struct hwmon_ops nfp_hwmon_ops = {
+ .is_visible = nfp_hwmon_is_visible,
+ .read = nfp_hwmon_read,
+};
+
+static const struct hwmon_chip_info nfp_chip_info = {
+ .ops = &nfp_hwmon_ops,
+ .info = nfp_hwmon_info,
+};
+
+int nfp_hwmon_register(struct nfp_pf *pf)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ if (!pf->nspi) {
+ nfp_warn(pf->cpp, "not registering HWMON (no NSP info)\n");
+ return 0;
+ }
+ if (!pf->nspi->sensor_mask) {
+ nfp_info(pf->cpp,
+ "not registering HWMON (NSP doesn't report sensors)\n");
+ return 0;
+ }
+
+ pf->hwmon_dev = hwmon_device_register_with_info(&pf->pdev->dev, "nfp",
+ pf, &nfp_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(pf->hwmon_dev);
+}
+
+void nfp_hwmon_unregister(struct nfp_pf *pf)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !pf->hwmon_dev)
+ return;
+
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dde35dae35c5..d67969d3e484 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -41,9 +41,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <net/devlink.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
@@ -52,6 +54,7 @@
#include "nfpcore/nfp6000_pcie.h"
+#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
@@ -71,20 +74,22 @@ static const struct pci_device_id nfp_pci_device_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
-static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
+static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
{
-#ifdef CONFIG_PCI_IOV
int err;
- pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err);
+ pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
if (!err)
- return;
+ return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
pf->limit_vfs = ~0;
+ pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
/* Allow any setting for backwards compatibility if symbol not found */
- if (err != -ENOENT)
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
-#endif
+ if (err == -ENOENT)
+ return 0;
+
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
@@ -93,23 +98,41 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
+ mutex_lock(&pf->lock);
+
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_unlock;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
- return err;
+ dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
+ goto err_unlock;
+ }
+
+ err = nfp_app_sriov_enable(pf->app, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev,
+ "App specific PCI SR-IOV configuration failed: %d\n",
+ err);
+ goto err_sriov_disable;
}
pf->num_vfs = num_vfs;
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
+ mutex_unlock(&pf->lock);
return num_vfs;
+
+err_sriov_disable:
+ pci_disable_sriov(pdev);
+err_unlock:
+ mutex_unlock(&pf->lock);
+ return err;
#endif
return 0;
}
@@ -119,19 +142,26 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ mutex_lock(&pf->lock);
+
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
* disabled
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
+ mutex_unlock(&pf->lock);
return -EPERM;
}
+ nfp_app_sriov_disable(pf->app);
+
pf->num_vfs = 0;
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
+
+ mutex_unlock(&pf->lock);
#endif
return 0;
}
@@ -166,7 +196,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
@@ -253,7 +283,6 @@ exit_release_fw:
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
- struct nfp_nsp_identify *nspi;
struct nfp_nsp *nsp;
int err;
@@ -270,14 +299,13 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
- nspi = __nfp_nsp_identify(nsp);
- if (nspi) {
- dev_info(&pdev->dev, "BSP: %s\n", nspi->version);
- kfree(nspi);
- }
+ pf->nspi = __nfp_nsp_identify(nsp);
+ if (pf->nspi)
+ dev_info(&pdev->dev, "BSP: %s\n", pf->nspi->version);
err = nfp_fw_load(pdev, pf, nsp);
if (err < 0) {
+ kfree(pf->nspi);
kfree(pf->eth_tbl);
dev_err(&pdev->dev, "Failed to load FW\n");
goto exit_close_nsp;
@@ -315,6 +343,7 @@ static void nfp_fw_unload(struct nfp_pf *pf)
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct devlink *devlink;
struct nfp_pf *pf;
int err;
@@ -335,15 +364,24 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_disable;
}
- pf = kzalloc(sizeof(*pf), GFP_KERNEL);
- if (!pf) {
+ devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf));
+ if (!devlink) {
err = -ENOMEM;
goto err_rel_regions;
}
+ pf = devlink_priv(devlink);
+ INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
+ mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
+ pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ if (!pf->wq) {
+ err = -ENOMEM;
+ goto err_pci_priv_unset;
+ }
+
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
if (IS_ERR_OR_NULL(pf->cpp)) {
err = PTR_ERR(pf->cpp);
@@ -352,34 +390,72 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_disable_msix;
}
+ pf->hwinfo = nfp_hwinfo_read(pf->cpp);
+
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
- nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.partno"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.serial"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.revision"),
- nfp_hwinfo_lookup(pf->cpp, "cpld.version"));
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"),
+ nfp_hwinfo_lookup(pf->hwinfo, "cpld.version"));
+
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto err_hwinfo_free;
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_cpp_free;
+ goto err_devlink_unreg;
- nfp_pcie_sriov_read_nfd_limit(pf);
+ pf->mip = nfp_mip_open(pf->cpp);
+ pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
- err = nfp_net_pci_probe(pf);
+ err = nfp_pcie_sriov_read_nfd_limit(pf);
if (err)
goto err_fw_unload;
+ pf->num_vfs = pci_num_vf(pdev);
+ if (pf->num_vfs > pf->limit_vfs) {
+ dev_err(&pdev->dev,
+ "Error: %d VFs already enabled, but loaded FW can only support %d\n",
+ pf->num_vfs, pf->limit_vfs);
+ goto err_fw_unload;
+ }
+
+ err = nfp_net_pci_probe(pf);
+ if (err)
+ goto err_sriov_unlimit;
+
+ err = nfp_hwmon_register(pf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register hwmon info\n");
+ goto err_net_remove;
+ }
+
return 0;
+err_net_remove:
+ nfp_net_pci_remove(pf);
+err_sriov_unlimit:
+ pci_sriov_set_totalvfs(pf->pdev, 0);
err_fw_unload:
+ kfree(pf->rtbl);
+ nfp_mip_close(pf->mip);
if (pf->fw_loaded)
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
-err_cpp_free:
+ kfree(pf->nspi);
+err_devlink_unreg:
+ devlink_unregister(devlink);
+err_hwinfo_free:
+ kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
err_disable_msix:
+ destroy_workqueue(pf->wq);
+err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
- kfree(pf);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
err_pci_disable:
@@ -391,19 +467,33 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+
+ nfp_hwmon_unregister(pf);
+
+ devlink = priv_to_devlink(pf);
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
+ pci_sriov_set_totalvfs(pf->pdev, 0);
+
+ devlink_unregister(devlink);
+ kfree(pf->rtbl);
+ nfp_mip_close(pf->mip);
if (pf->fw_loaded)
nfp_fw_unload(pf);
+ destroy_workqueue(pf->wq);
pci_set_drvdata(pdev, NULL);
+ kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
kfree(pf->eth_tbl);
- kfree(pf);
+ kfree(pf->nspi);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index b57de047b002..6922410806db 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -47,39 +47,66 @@
#include <linux/workqueue.h>
struct dentry;
+struct device;
+struct devlink_ops;
struct pci_dev;
struct nfp_cpp;
struct nfp_cpp_area;
struct nfp_eth_table;
+struct nfp_hwinfo;
+struct nfp_mip;
+struct nfp_net;
+struct nfp_nsp_identify;
+struct nfp_port;
+struct nfp_rtsym_table;
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
- * @ctrl_area: Pointer to the CPP area for the control BAR
- * @tx_area: Pointer to the CPP area for the TX queues
- * @rx_area: Pointer to the CPP area for the FL/RX queues
- * @irq_entries: Array of MSI-X entries for all ports
+ * @app: Pointer to the APP handle
+ * @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
+ * @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
+ * @qc_area: Pointer to the CPP area for the queues
+ * @mac_stats_bar: Pointer to the CPP area for the MAC stats
+ * @mac_stats_mem: Pointer to mapped MAC stats area
+ * @vf_cfg_bar: Pointer to the CPP area for the VF configuration BAR
+ * @vf_cfg_mem: Pointer to mapped VF configuration area
+ * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
+ * @ctrl_vnic: Pointer to the control vNIC if available
+ * @mip: MIP handle
+ * @rtbl: RTsym table
+ * @hwinfo: HWInfo table
* @eth_tbl: NSP ETH table
+ * @nspi: NSP identification info
+ * @hwmon_dev: pointer to hwmon device
* @ddir: Per-device debugfs directory
- * @num_ports: Number of adapter ports app firmware supports
- * @num_netdevs: Number of netdevs spawned
- * @ports: Linked list of port structures (struct nfp_net)
- * @port_lock: Protects @ports, @num_ports, @num_netdevs
+ * @max_data_vnics: Number of data vNICs app firmware supports
+ * @num_vnics: Number of vNICs spawned
+ * @vnics: Linked list of vNIC structures (struct nfp_net)
+ * @ports: Linked list of port structures (struct nfp_port)
+ * @wq: Workqueue for running works which need to grab @lock
* @port_refresh_work: Work entry for taking netdevs out
+ * @lock: Protects all fields which may change after probe
*/
struct nfp_pf {
struct pci_dev *pdev;
struct nfp_cpp *cpp;
- struct nfp_cpp_area *ctrl_area;
- struct nfp_cpp_area *tx_area;
- struct nfp_cpp_area *rx_area;
+ struct nfp_app *app;
+
+ struct nfp_cpp_area *data_vnic_bar;
+ struct nfp_cpp_area *ctrl_vnic_bar;
+ struct nfp_cpp_area *qc_area;
+ struct nfp_cpp_area *mac_stats_bar;
+ u8 __iomem *mac_stats_mem;
+ struct nfp_cpp_area *vf_cfg_bar;
+ u8 __iomem *vf_cfg_mem;
struct msix_entry *irq_entries;
@@ -88,21 +115,42 @@ struct nfp_pf {
bool fw_loaded;
+ struct nfp_net *ctrl_vnic;
+
+ const struct nfp_mip *mip;
+ struct nfp_rtsym_table *rtbl;
+ struct nfp_hwinfo *hwinfo;
struct nfp_eth_table *eth_tbl;
+ struct nfp_nsp_identify *nspi;
+
+ struct device *hwmon_dev;
struct dentry *ddir;
- unsigned int num_ports;
- unsigned int num_netdevs;
+ unsigned int max_data_vnics;
+ unsigned int num_vnics;
+ struct list_head vnics;
struct list_head ports;
+
+ struct workqueue_struct *wq;
struct work_struct port_refresh_work;
- struct mutex port_lock;
+
+ struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
+extern const struct devlink_ops nfp_devlink_ops;
+
int nfp_net_pci_probe(struct nfp_pf *pf);
void nfp_net_pci_remove(struct nfp_pf *pf);
+int nfp_hwmon_register(struct nfp_pf *pf);
+void nfp_hwmon_unregister(struct nfp_pf *pf);
+
+void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index fcf81b3be830..b1fa77bd708b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,15 +50,32 @@
#include "nfp_net_ctrl.h"
-#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_pr(nn, lvl, fmt, args...) \
+ ({ \
+ struct nfp_net *__nn = (nn); \
+ \
+ if (__nn->dp.netdev) \
+ netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
+ else \
+ dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
+ })
+
+#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
+#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
+#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
+#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
+
#define nn_dp_warn(dp, fmt, args...) \
- do { \
- if (unlikely(net_ratelimit())) \
- netdev_warn((dp)->netdev, fmt, ## args); \
- } while (0)
+ ({ \
+ struct nfp_net_dp *__dp = (dp); \
+ \
+ if (unlikely(net_ratelimit())) { \
+ if (__dp->netdev) \
+ netdev_warn(__dp->netdev, fmt, ## args); \
+ else \
+ dev_warn(__dp->dev, fmt, ## args); \
+ } \
+ })
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
@@ -84,7 +101,7 @@
#define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1
-#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
+#define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
@@ -102,6 +119,7 @@
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
+#define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
@@ -115,6 +133,10 @@ struct nfp_cpp;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
+struct nfp_port;
+
+/* Convenience macro for wrapping descriptor index on ring size */
+#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
#define nfp_desc_set_dma_addr(desc, dma_addr) \
@@ -153,10 +175,15 @@ struct nfp_net_tx_desc {
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
- u8 l4_offset; /* LSO, where the L4 data starts */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
-
- __le16 vlan; /* VLAN tag to add if indicated */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
@@ -287,9 +314,12 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
struct nfp_meta_parsed {
- u32 hash_type;
+ u8 hash_type;
+ u8 csum_type;
u32 hash;
u32 mark;
+ u32 portid;
+ __wsum csum;
};
struct nfp_net_rx_hash {
@@ -316,8 +346,6 @@ struct nfp_net_rx_buf {
* @idx: Ring index from Linux's perspective
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @qcp_fl: Pointer to base of the QCP freelist queue
- * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
- * (used for free list batching)
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
@@ -331,7 +359,6 @@ struct nfp_net_rx_ring {
u32 rd_p;
u32 idx;
- u32 wr_ptr_add;
int fl_qcidx;
u8 __iomem *qcp_fl;
@@ -379,7 +406,14 @@ struct nfp_net_rx_ring {
*/
struct nfp_net_r_vector {
struct nfp_net *nfp_net;
- struct napi_struct napi;
+ union {
+ struct napi_struct napi;
+ struct {
+ struct tasklet_struct tasklet;
+ struct sk_buff_head queue;
+ struct spinlock lock;
+ };
+ };
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring;
@@ -508,11 +542,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @rx_filter: Filter offload statistics - dropped packets/bytes
- * @rx_filter_prev: Filter offload statistics - values from previous update
- * @rx_filter_change: Jiffies when statistics last changed
- * @rx_filter_stats_timer: Timer for polling filter offload statistics
- * @rx_filter_lock: Lock protecting timer state changes (teardown)
+ * @xdp_flags: Flags with which XDP prog was loaded
+ * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -531,7 +562,6 @@ struct nfp_net_dp {
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
- * @link_changed: Has link state changes since last port refresh?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
@@ -544,10 +574,11 @@ struct nfp_net_dp {
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
- * @port_list: Entry on device port list
+ * @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
- * @cpp: CPP device handle if available
- * @eth_port: Translated ETH Table port entry
+ * @app: APP handle if available
+ * @port: Pointer to nfp_port structure if vNIC is a port
+ * @app_priv: APP private data for this vNIC
*/
struct nfp_net {
struct nfp_net_dp dp;
@@ -562,10 +593,8 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- struct nfp_stat_pair rx_filter, rx_filter_prev;
- unsigned long rx_filter_change;
- struct timer_list rx_filter_stats_timer;
- spinlock_t rx_filter_lock;
+ u32 xdp_flags;
+ struct bpf_prog *xdp_prog;
unsigned int max_tx_rings;
unsigned int max_rx_rings;
@@ -589,7 +618,6 @@ struct nfp_net {
u32 me_freq_mhz;
bool link_up;
- bool link_changed;
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
@@ -614,12 +642,14 @@ struct nfp_net {
struct dentry *debugfs_dir;
u32 ethtool_dump_flag;
- struct list_head port_list;
+ struct list_head vnic_list;
struct pci_dev *pdev;
- struct nfp_cpp *cpp;
+ struct nfp_app *app;
+
+ struct nfp_port *port;
- struct nfp_eth_table_port *eth_port;
+ void *app_priv;
};
/* Functions to read/write from/to a BAR
@@ -681,6 +711,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
+#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
@@ -788,19 +819,47 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
+static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
+{
+ WARN_ON_ONCE(!nn->dp.netdev && nn->port);
+ return !!nn->dp.netdev;
+}
+
+static inline bool nfp_net_running(struct nfp_net *nn)
+{
+ return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
+}
+
+static inline const char *nfp_net_name(struct nfp_net *nn)
+{
+ return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
+}
+
/* Globals */
extern const char nfp_driver_version[];
+extern const struct net_device_ops nfp_net_netdev_ops;
+
+static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &nfp_net_netdev_ops;
+}
+
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings, unsigned int max_rx_rings);
-void nfp_net_netdev_free(struct nfp_net *nn);
-int nfp_net_netdev_init(struct net_device *netdev);
-void nfp_net_netdev_clean(struct net_device *netdev);
+nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings);
+void nfp_net_free(struct nfp_net *nn);
+
+int nfp_net_init(struct nfp_net *nn);
+void nfp_net_clean(struct nfp_net *nn);
+
+int nfp_ctrl_open(struct nfp_net *nn);
+void nfp_ctrl_close(struct nfp_net *nn);
+
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
@@ -821,15 +880,11 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
-int nfp_net_refresh_eth_port(struct nfp_net *nn);
-void nfp_net_refresh_port_table(struct nfp_net *nn);
-
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else
static inline void nfp_net_debugfs_create(void)
@@ -846,7 +901,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
}
static inline void
-nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
}
@@ -855,7 +910,4 @@ static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
}
#endif /* CONFIG_NFP_DEBUG */
-void nfp_net_filter_stats_timer(unsigned long data);
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
-
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 82bd6b0935f1..18750ff0ede6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -61,15 +61,17 @@
#include <linux/log2.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
-
+#include <linux/vmalloc.h>
#include <linux/ktime.h>
-#include <net/pkt_cls.h>
+#include <net/switchdev.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -279,6 +281,30 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret;
}
+/**
+ * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * @nn: NFP Net device to reconfigure
+ * @mbox_cmd: The value for the mailbox command
+ *
+ * Helper function for mailbox updates
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+
+ ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ if (ret) {
+ nn_err(nn, "Mailbox update error\n");
+ return ret;
+ }
+
+ return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+}
+
/* Interrupt configuration and handling
*/
@@ -391,17 +417,13 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
return IRQ_HANDLED;
}
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
+static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
{
- unsigned long flags;
- bool ret;
+ struct nfp_net_r_vector *r_vec = data;
- spin_lock_irqsave(&nn->link_status_lock, flags);
- ret = nn->link_changed;
- nn->link_changed = false;
- spin_unlock_irqrestore(&nn->link_status_lock, flags);
+ tasklet_schedule(&r_vec->tasklet);
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -423,7 +445,8 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
goto out;
nn->link_up = link_up;
- nn->link_changed = true;
+ if (nn->port)
+ set_bit(NFP_PORT_CHANGED, &nn->port->flags);
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -515,34 +538,6 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
}
/**
- * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
- * @netdev: netdev structure
- */
-static void nfp_net_vecs_init(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_r_vector *r_vec;
- int r;
-
- nn->lsc_handler = nfp_net_irq_lsc;
- nn->exn_handler = nfp_net_irq_exn;
-
- for (r = 0; r < nn->max_r_vecs; r++) {
- struct msix_entry *entry;
-
- entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
-
- r_vec = &nn->r_vecs[r];
- r_vec->nfp_net = nn;
- r_vec->handler = nfp_net_irq_rxtx;
- r_vec->irq_entry = entry->entry;
- r_vec->irq_vector = entry->vector;
-
- cpumask_set_cpu(r, &r_vec->affinity_mask);
- }
-}
-
-/**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
@@ -562,7 +557,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry = &nn->irq_entries[vector_idx];
- snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
+ snprintf(name, name_sz, format, nfp_net_name(nn));
err = request_irq(entry->vector, handler, 0, name, nn);
if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -667,17 +662,22 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
if (!skb_is_gso(skb))
return;
- if (!skb->encapsulation)
+ if (!skb->encapsulation) {
+ txd->l3_offset = skb_network_offset(skb);
+ txd->l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- else
+ } else {
+ txd->l3_offset = skb_inner_network_offset(skb);
+ txd->l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
+ }
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l4_offset = hdrlen;
+ txd->lso_hdrlen = hdrlen;
txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO;
@@ -756,6 +756,26 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
+static int nfp_net_prep_port_id(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+
+ if (likely(!md_dst))
+ return 0;
+ if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 8)))
+ return -ENOMEM;
+
+ data = skb_push(skb, 8);
+ put_unaligned_be32(NFP_NET_META_PORTID, data);
+ put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+
+ return 8;
+}
+
/**
* nfp_net_tx() - Main transmit entry point
* @skb: SKB to transmit
@@ -768,6 +788,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
struct nfp_net_tx_desc *txd, txdg;
+ int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
@@ -775,8 +796,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
- int f, nr_frags;
- int wr_idx;
u16 qidx;
dp = &nn->dp;
@@ -798,13 +817,20 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
+ md_bytes = nfp_net_prep_port_id(skb);
+ if (unlikely(md_bytes < 0)) {
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -816,19 +842,18 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
+ txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
nfp_desc_set_dma_addr(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_net_tx_tso(r_vec, txbuf, txd, skb);
-
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
-
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
@@ -848,7 +873,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
- wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
tx_ring->txbufs[wr_idx].skb = skb;
tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
tx_ring->txbufs[wr_idx].fidx = f;
@@ -857,7 +882,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
*txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop =
+ txd->offset_eop |=
(f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
}
@@ -936,14 +961,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
if (qcp_rd_p == tx_ring->qcp_rd_p)
return;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
- tx_ring->rd_p++;
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
skb = tx_ring->txbufs[idx].skb;
if (!skb)
@@ -982,6 +1003,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
+ if (!dp->netdev)
+ return;
+
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) {
@@ -997,45 +1021,45 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
-static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
int idx, todo;
u32 qcp_rd_p;
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
+ return true;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
done_pkts = todo;
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring->rd_p++;
done_bytes += tx_ring->txbufs[idx].real_len;
}
- tx_ring->qcp_rd_p = qcp_rd_p;
-
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_bytes += done_bytes;
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
}
/**
@@ -1056,7 +1080,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
struct sk_buff *skb;
int idx, nr_frags;
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_buf = &tx_ring->txbufs[idx];
skb = tx_ring->txbufs[idx].skb;
@@ -1091,7 +1115,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
- if (tx_ring->is_xdp)
+ if (tx_ring->is_xdp || !dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
@@ -1209,7 +1233,7 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
{
unsigned int wr_idx;
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
nfp_net_dma_sync_dev_rx(dp, dma_addr);
@@ -1224,14 +1248,12 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
- rx_ring->wr_ptr_add++;
- if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
/* Update write pointer of the freelist queue. Make
* sure all writes are flushed before telling the hardware.
*/
wmb();
- nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
- rx_ring->wr_ptr_add = 0;
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
}
}
@@ -1247,7 +1269,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
unsigned int wr_idx, last_idx;
/* Move the empty entry to the end of the list */
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
@@ -1257,7 +1279,6 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
- rx_ring->wr_ptr_add = 0;
}
/**
@@ -1350,17 +1371,28 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
* @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
* @skb: Pointer to SKB
*/
static void nfp_net_rx_csum(struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
+ struct nfp_net_rx_desc *rxd,
+ struct nfp_meta_parsed *meta, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_error++;
@@ -1445,6 +1477,16 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
default:
return NULL;
}
@@ -1479,18 +1521,26 @@ static bool
nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len)
+ unsigned int pkt_len, bool *completed)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return false;
+ if (!*completed) {
+ nfp_net_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
}
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -1515,7 +1565,7 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
@@ -1559,6 +1609,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
@@ -1574,10 +1625,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ struct net_device *netdev;
dma_addr_t new_dma_addr;
void *new_frag;
- idx = rx_ring->rd_p & (rx_ring->cnt - 1);
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
@@ -1652,7 +1704,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp)) {
+ dp->bpf_offload_xdp) && !meta.portid) {
unsigned int dma_off;
void *hard_start;
int act;
@@ -1669,14 +1721,17 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
dma_off,
- pkt_len)))
+ pkt_len,
+ &xdp_tx_cmpl)))
trace_xdp_exception(dp->netdev,
xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
@@ -1695,6 +1750,20 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_repr_get(nn->app, meta.portid);
+ if (unlikely(!netdev)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
@@ -1706,9 +1775,9 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, dp->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
- nfp_net_rx_csum(dp, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1717,8 +1786,14 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
- if (xdp_prog && tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_net_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
rcu_read_unlock();
return pkts_polled;
@@ -1739,11 +1814,8 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (r_vec->tx_ring)
nfp_net_tx_complete(r_vec->tx_ring);
- if (r_vec->rx_ring) {
+ if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
- if (r_vec->xdp_ring)
- nfp_net_xdp_complete(r_vec->xdp_ring);
- }
if (pkts_polled < budget)
if (napi_complete_done(napi, pkts_polled))
@@ -1752,10 +1824,273 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
return pkts_polled;
}
+/* Control device data path
+ */
+
+static bool
+nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ unsigned int real_len = skb->len, meta_len = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return true;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ meta_len = 8;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+ }
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_warn;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = real_len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return false;
+
+err_dma_warn:
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+ bool ret;
+
+ spin_lock_bh(&r_vec->lock);
+ ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
+ spin_unlock_bh(&r_vec->lock);
+
+ return ret;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ continue;
+}
+
+static void nfp_ctrl_poll(unsigned long arg)
+{
+ struct nfp_net_r_vector *r_vec = (void *)arg;
+
+ spin_lock_bh(&r_vec->lock);
+ nfp_net_tx_complete(r_vec->tx_ring);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock_bh(&r_vec->lock);
+
+ nfp_ctrl_rx(r_vec);
+
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+}
+
/* Setup and Configuration
*/
/**
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
+ * @nn: NFP Network structure
+ */
+static void nfp_net_vecs_init(struct nfp_net *nn)
+{
+ struct nfp_net_r_vector *r_vec;
+ int r;
+
+ nn->lsc_handler = nfp_net_irq_lsc;
+ nn->exn_handler = nfp_net_irq_exn;
+
+ for (r = 0; r < nn->max_r_vecs; r++) {
+ struct msix_entry *entry;
+
+ entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
+
+ r_vec = &nn->r_vecs[r];
+ r_vec->nfp_net = nn;
+ r_vec->irq_entry = entry->entry;
+ r_vec->irq_vector = entry->vector;
+
+ if (nn->dp.netdev) {
+ r_vec->handler = nfp_net_irq_rxtx;
+ } else {
+ r_vec->handler = nfp_ctrl_irq_rxtx;
+
+ __skb_queue_head_init(&r_vec->queue);
+ spin_lock_init(&r_vec->lock);
+ tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
+ (unsigned long)r_vec);
+ tasklet_disable(&r_vec->tasklet);
+ }
+
+ cpumask_set_cpu(r, &r_vec->affinity_mask);
+ }
+}
+
+/**
* nfp_net_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free
*/
@@ -1803,7 +2138,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs)
goto err_alloc;
- if (!tx_ring->is_xdp)
+ if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
@@ -2017,15 +2352,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int err;
/* Setup NAPI */
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
+ if (nn->dp.netdev)
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
+ else
+ tasklet_enable(&r_vec->tasklet);
snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->dp.netdev->name, idx);
+ "%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
- netif_napi_del(&r_vec->napi);
+ if (nn->dp.netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
+
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
@@ -2043,7 +2385,11 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
irq_set_affinity_hint(r_vec->irq_vector, NULL);
- netif_napi_del(&r_vec->napi);
+ if (nn->dp.netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
+
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2105,17 +2451,16 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
/**
* nfp_net_write_mac_addr() - Write mac address to the device control BAR
* @nn: NFP Net device to reconfigure
+ * @addr: MAC address to write
*
* Writes the MAC address from the netdev to the device control BAR. Does not
* perform the required reconfig. We do a bit of byte swapping dance because
* firmware is LE.
*/
-static void nfp_net_write_mac_addr(struct nfp_net *nn)
+static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
{
- nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
- get_unaligned_be32(nn->dp.netdev->dev_addr));
- nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
- get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
+ nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
+ nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2197,17 +2542,15 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
new_ctrl = nn->dp.ctrl;
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
update |= NFP_NET_CFG_UPDATE_RSS;
}
- if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_coalesce_write_cfg(nn);
-
- new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
@@ -2222,9 +2565,10 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn);
+ if (nn->dp.netdev)
+ nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
- nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
@@ -2262,6 +2606,86 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
}
/**
+ * nfp_net_close_stack() - Quiesce the stack (part of close)
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ netif_carrier_off(nn->dp.netdev);
+ nn->link_up = false;
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
+ disable_irq(nn->r_vecs[r].irq_vector);
+ napi_disable(&nn->r_vecs[r].napi);
+ }
+
+ netif_tx_disable(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ nfp_net_tx_rings_free(&nn->dp);
+ nfp_net_rx_rings_free(&nn->dp);
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
+ * nfp_net_netdev_close() - Called when the device is downed
+ * @netdev: netdev structure
+ */
+static int nfp_net_netdev_close(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Step 1: Disable RX and TX rings from the Linux kernel perspective
+ */
+ nfp_net_close_stack(nn);
+
+ /* Step 2: Tell NFP
+ */
+ nfp_net_clear_config_and_disable(nn);
+
+ /* Step 3: Free resources
+ */
+ nfp_net_close_free_all(nn);
+
+ nn_dbg(nn, "%s down", netdev->name);
+ return 0;
+}
+
+void nfp_ctrl_close(struct nfp_net *nn)
+{
+ int r;
+
+ rtnl_lock();
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
+ disable_irq(nn->r_vecs[r].irq_vector);
+ tasklet_disable(&nn->r_vecs[r].tasklet);
+ }
+
+ nfp_net_clear_config_and_disable(nn);
+
+ nfp_net_close_free_all(nn);
+
+ rtnl_unlock();
+}
+
+/**
* nfp_net_open_stack() - Start the device from stack's perspective
* @nn: NFP Net device to reconfigure
*/
@@ -2280,16 +2704,10 @@ static void nfp_net_open_stack(struct nfp_net *nn)
nfp_net_read_link_status(nn);
}
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int nfp_net_open_alloc_all(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
int err, r;
- /* Step 1: Allocate resources for rings and the like
- * - Request interrupts
- * - Allocate RX and TX ring resources
- * - Setup initial RSS table
- */
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
nn->exn_name, sizeof(nn->exn_name),
NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
@@ -2319,13 +2737,42 @@ static int nfp_net_netdev_open(struct net_device *netdev)
for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
+ return 0;
+
+err_free_rx_rings:
+ nfp_net_rx_rings_free(&nn->dp);
+err_cleanup_vec:
+ r = nn->dp.num_r_vecs;
+err_cleanup_vec_p:
+ while (r--)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+err_free_exn:
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+ return err;
+}
+
+static int nfp_net_netdev_open(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ /* Step 1: Allocate resources for rings and the like
+ * - Request interrupts
+ * - Allocate RX and TX ring resources
+ * - Setup initial RSS table
+ */
+ err = nfp_net_open_alloc_all(nn);
+ if (err)
+ return err;
+
err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
if (err)
- goto err_free_rings;
+ goto err_free_all;
err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
if (err)
- goto err_free_rings;
+ goto err_free_all;
/* Step 2: Configure the NFP
* - Enable rings from 0 to tx_rings/rx_rings - 1.
@@ -2336,7 +2783,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
*/
err = nfp_net_set_config_and_enable(nn);
if (err)
- goto err_free_rings;
+ goto err_free_all;
/* Step 3: Enable for kernel
* - put some freelist descriptors on each RX ring
@@ -2348,89 +2795,38 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0;
-err_free_rings:
- nfp_net_tx_rings_free(&nn->dp);
-err_free_rx_rings:
- nfp_net_rx_rings_free(&nn->dp);
-err_cleanup_vec:
- r = nn->dp.num_r_vecs;
-err_cleanup_vec_p:
- while (r--)
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
-err_free_exn:
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+err_free_all:
+ nfp_net_close_free_all(nn);
return err;
}
-/**
- * nfp_net_close_stack() - Quiescent the stack (part of close)
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_close_stack(struct nfp_net *nn)
+int nfp_ctrl_open(struct nfp_net *nn)
{
- unsigned int r;
-
- disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- netif_carrier_off(nn->dp.netdev);
- nn->link_up = false;
+ int err, r;
- for (r = 0; r < nn->dp.num_r_vecs; r++) {
- disable_irq(nn->r_vecs[r].irq_vector);
- napi_disable(&nn->r_vecs[r].napi);
- }
+ /* ring dumping depends on vNICs being opened/closed under rtnl */
+ rtnl_lock();
- netif_tx_disable(nn->dp.netdev);
-}
+ err = nfp_net_open_alloc_all(nn);
+ if (err)
+ goto err_unlock;
-/**
- * nfp_net_close_free_all() - Free all runtime resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_close_free_all(struct nfp_net *nn)
-{
- unsigned int r;
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_all;
- for (r = 0; r < nn->dp.num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
- nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
- }
- for (r = 0; r < nn->dp.num_tx_rings; r++) {
- nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
- nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
- }
for (r = 0; r < nn->dp.num_r_vecs; r++)
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
-
- kfree(nn->dp.rx_rings);
- kfree(nn->dp.tx_rings);
-
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
-}
-
-/**
- * nfp_net_netdev_close() - Called when the device is downed
- * @netdev: netdev structure
- */
-static int nfp_net_netdev_close(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
+ enable_irq(nn->r_vecs[r].irq_vector);
- /* Step 1: Disable RX and TX rings from the Linux kernel perspective
- */
- nfp_net_close_stack(nn);
+ rtnl_unlock();
- /* Step 2: Tell NFP
- */
- nfp_net_clear_config_and_disable(nn);
+ return 0;
- /* Step 3: Free resources
- */
+err_free_all:
nfp_net_close_free_all(nn);
-
- nn_dbg(nn, "%s down", netdev->name);
- return 0;
+err_unlock:
+ rtnl_unlock();
+ return err;
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2634,6 +3030,40 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
return nfp_net_ring_reconfig(nn, dp, NULL);
}
+static int
+nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+}
+
+static int
+nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -2667,35 +3097,6 @@ static void nfp_net_stat64(struct net_device *netdev,
}
}
-static bool nfp_net_ebpf_capable(struct nfp_net *nn)
-{
- if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
- nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
- return true;
- return false;
-}
-
-static int
-nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
- return -EOPNOTSUPP;
- if (proto != htons(ETH_P_ALL))
- return -EOPNOTSUPP;
-
- if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
- if (!nn->dp.bpf_offload_xdp)
- return nfp_net_bpf_offload(nn, tc->cls_bpf);
- else
- return -EBUSY;
- }
-
- return -EINVAL;
-}
-
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2710,9 +3111,9 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
@@ -2724,9 +3125,10 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -2743,6 +3145,13 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
}
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -2750,7 +3159,7 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
nn_err(nn, "Cannot disable HW TC offload while in use\n");
return -EBUSY;
}
@@ -2818,26 +3227,6 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-static int
-nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int err;
-
- if (!nn->eth_port)
- return -EOPNOTSUPP;
-
- if (!nn->eth_port->is_split)
- err = snprintf(name, len, "p%d", nn->eth_port->label_port);
- else
- err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
- nn->eth_port->label_subport);
- if (err >= len)
- return -EINVAL;
-
- return 0;
-}
-
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -2919,47 +3308,14 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
-{
- struct tc_cls_bpf_offload cmd = {
- .prog = prog,
- };
- int ret;
-
- if (!nfp_net_ebpf_capable(nn))
- return -EINVAL;
-
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->dp.bpf_offload_xdp)
- return prog ? -EBUSY : 0;
- cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
- } else {
- if (!prog)
- return 0;
- cmd.command = TC_CLSBPF_ADD;
- }
-
- ret = nfp_net_bpf_offload(nn, &cmd);
- /* Stop offload if replace not possible */
- if (ret && cmd.command == TC_CLSBPF_REPLACE)
- nfp_net_xdp_offload(nn, NULL);
- nn->dp.bpf_offload_xdp = prog && !ret;
- return ret;
-}
-
-static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
+static int
+nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- struct bpf_prog *old_prog = nn->dp.xdp_prog;
- struct bpf_prog *prog = xdp->prog;
struct nfp_net_dp *dp;
- int err;
- if (!prog && !nn->dp.xdp_prog)
- return 0;
- if (prog && nn->dp.xdp_prog) {
- prog = xchg(&nn->dp.xdp_prog, prog);
- bpf_prog_put(prog);
- nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
+ if (!prog == !nn->dp.xdp_prog) {
+ WRITE_ONCE(nn->dp.xdp_prog, prog);
return 0;
}
@@ -2973,14 +3329,37 @@ static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- err = nfp_net_ring_reconfig(nn, dp, xdp->extack);
+ return nfp_net_ring_reconfig(nn, dp, extack);
+}
+
+static int
+nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *drv_prog, *offload_prog;
+ int err;
+
+ if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ return -EBUSY;
+
+ /* Load both when no flags set to allow easy activation of driver path
+ * when program is replaced by one which can't be offloaded.
+ */
+ drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
+ offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
+
+ err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
if (err)
return err;
- if (old_prog)
- bpf_prog_put(old_prog);
+ err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+ if (err && flags & XDP_FLAGS_HW_MODE)
+ return err;
- nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
+ if (nn->xdp_prog)
+ bpf_prog_put(nn->xdp_prog);
+ nn->xdp_prog = prog;
+ nn->xdp_flags = flags;
return 0;
}
@@ -2991,28 +3370,56 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
- return nfp_net_xdp_setup(nn, xdp);
+ case XDP_SETUP_PROG_HW:
+ return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
+ xdp->extack);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->dp.xdp_prog;
+ xdp->prog_attached = !!nn->xdp_prog;
+ if (nn->dp.bpf_offload_xdp)
+ xdp->prog_attached = XDP_ATTACHED_HW;
+ xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
}
}
-static const struct net_device_ops nfp_net_netdev_ops = {
+static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ err = eth_prepare_mac_addr_change(netdev, addr);
+ if (err)
+ return err;
+
+ nfp_net_write_mac_addr(nn, saddr->sa_data);
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
+ if (err)
+ return err;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+const struct net_device_ops nfp_net_netdev_ops = {
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
- .ndo_setup_tc = nfp_net_setup_tc,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+ .ndo_setup_tc = nfp_port_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3032,7 +3439,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3043,43 +3450,58 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
- nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
- nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
- nfp_net_ebpf_capable(nn) ? "BPF " : "");
+ nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
+ "RXCSUM_COMPLETE " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
+ nfp_app_extra_cap(nn->app, nn));
}
/**
- * nfp_net_netdev_alloc() - Allocate netdev and related structure
+ * nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
*
* This function allocates a netdev device and fills in the initial
- * part of the @struct nfp_net structure.
+ * part of the @struct nfp_net structure. In case of control device
+ * nfp_net structure is allocated without the netdev.
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+ unsigned int max_tx_rings,
+ unsigned int max_rx_rings)
{
- struct net_device *netdev;
struct nfp_net *nn;
- netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
- max_tx_rings, max_rx_rings);
- if (!netdev)
- return ERR_PTR(-ENOMEM);
+ if (needs_netdev) {
+ struct net_device *netdev;
- SET_NETDEV_DEV(netdev, &pdev->dev);
- nn = netdev_priv(netdev);
+ netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
+ max_tx_rings, max_rx_rings);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nn = netdev_priv(netdev);
+ nn->dp.netdev = netdev;
+ } else {
+ nn = vzalloc(sizeof(*nn));
+ if (!nn)
+ return ERR_PTR(-ENOMEM);
+ }
- nn->dp.netdev = netdev;
nn->dp.dev = &pdev->dev;
nn->pdev = pdev;
@@ -3099,24 +3521,27 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
- spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
- setup_timer(&nn->rx_filter_stats_timer,
- nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
/**
- * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
+ * nfp_net_free() - Undo what @nfp_net_alloc() did
* @nn: NFP Net device to reconfigure
*/
-void nfp_net_netdev_free(struct nfp_net *nn)
+void nfp_net_free(struct nfp_net *nn)
{
- free_netdev(nn->dp.netdev);
+ if (nn->xdp_prog)
+ bpf_prog_put(nn->xdp_prog);
+
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
}
/**
@@ -3187,48 +3612,13 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
nn->tx_coalesce_max_frames = 64;
}
-/**
- * nfp_net_netdev_init() - Initialise/finalise the netdev structure
- * @netdev: netdev structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_netdev_init(struct net_device *netdev)
+static void nfp_net_netdev_init(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
- int err;
-
- nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
-
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
- /* Get some of the read-only fields from the BAR */
- nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
- nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
+ struct net_device *netdev = nn->dp.netdev;
- nfp_net_write_mac_addr(nn);
+ nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
- /* Determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2) {
- u32 reg;
-
- reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- if (reg > NFP_NET_MAX_PREPEND) {
- nn_err(nn, "Invalid rx offset: %d\n", reg);
- return -EINVAL;
- }
- nn->dp.rx_offset = reg;
- } else {
- nn->dp.rx_offset = NFP_NET_RX_OFFSET;
- }
-
- /* Set default MTU and Freelist buffer size */
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
- netdev->mtu = nn->max_mtu;
- else
- netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->dp.mtu = netdev->mtu;
- nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+ netdev->mtu = nn->dp.mtu;
/* Advertise/enable offloads based on capabilities
*
@@ -3236,10 +3626,13 @@ int nfp_net_netdev_init(struct net_device *netdev)
* and netdev->hw_features advertises which features are
* supported. By default we enable most features.
*/
+ if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
netdev->hw_features = NETIF_F_HIGHDMA;
- if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3249,15 +3642,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_SG;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
- if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
+ if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
+ nn->cap & NFP_NET_CFG_CTRL_LSO2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
}
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
- nfp_net_rss_init(nn);
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
- }
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
@@ -3275,17 +3667,98 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
+ nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
+ } else {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ }
+ }
+ if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
netdev->features = netdev->hw_features;
- if (nfp_net_ebpf_capable(nn))
+ if (nfp_app_has_tc(nn->app))
netdev->hw_features |= NETIF_F_HW_TC;
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+
+ /* Finalise the netdev setup */
+ netdev->netdev_ops = &nfp_net_netdev_ops;
+ netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+
+ SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = nn->max_mtu;
+
+ netif_carrier_off(netdev);
+
+ nfp_net_set_ethtool_ops(netdev);
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ /* Get some of the read-only fields from the BAR */
+ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
+ nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
+
+ /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
+ * we allow use of non-chained metadata if RSS(v1) is the only
+ * advertised capability requiring metadata.
+ */
+ nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
+ !nn->dp.netdev ||
+ !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
+ nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
+ /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
+ * it has the same meaning as RSSv2.
+ */
+ if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
+ nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
+
+ /* Determine RX packet/metadata boundary offset */
+ if (nn->fw_ver.major >= 2) {
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ if (reg > NFP_NET_MAX_PREPEND) {
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
+ return -EINVAL;
+ }
+ nn->dp.rx_offset = reg;
+ } else {
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+ }
+
+ /* Set default MTU and Freelist buffer size */
+ if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
+ nn->dp.mtu = nn->max_mtu;
+ else
+ nn->dp.mtu = NFP_NET_DEFAULT_MTU;
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
+ nfp_net_rss_init(nn);
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
+ NFP_NET_CFG_CTRL_RSS;
+ }
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
@@ -3299,6 +3772,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ if (nn->dp.netdev)
+ nfp_net_netdev_init(nn);
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -3311,34 +3787,21 @@ int nfp_net_netdev_init(struct net_device *netdev)
if (err)
return err;
- /* Finalise the netdev setup */
- netdev->netdev_ops = &nfp_net_netdev_ops;
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
-
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = nn->max_mtu;
-
- netif_carrier_off(netdev);
-
- nfp_net_set_ethtool_ops(netdev);
- nfp_net_vecs_init(netdev);
+ nfp_net_vecs_init(nn);
- return register_netdev(netdev);
+ if (!nn->dp.netdev)
+ return 0;
+ return register_netdev(nn->dp.netdev);
}
/**
- * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
- * @netdev: netdev structure
+ * nfp_net_clean() - Undo what nfp_net_init() did.
+ * @nn: NFP Net device structure
*/
-void nfp_net_netdev_clean(struct net_device *netdev)
+void nfp_net_clean(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ if (!nn->dp.netdev)
+ return;
unregister_netdev(nn->dp.netdev);
-
- if (nn->dp.xdp_prog)
- bpf_prog_put(nn->dp.xdp_prog);
- if (nn->dp.bpf_offload_xdp)
- nfp_net_xdp_offload(nn, NULL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d04ccc9f6116..e5e94e0746ec 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -71,6 +71,10 @@
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_PORTID 5
+#define NFP_NET_META_CSUM 6 /* checksum complete type */
+
+#define NFP_META_PORT_ID_CTRL ~0U
/**
* Hash type pre-pended when a RSS hash was computed
@@ -119,9 +123,10 @@
#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
-#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */
+#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
+#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
-#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */
+#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
@@ -131,6 +136,20 @@
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_CSUM_COMPLETE (0x1 << 30) /* Checksum complete */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
+
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_LSO2)
+#define NFP_NET_CFG_CTRL_RSS_ANY (NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_RSS2)
+#define NFP_NET_CFG_CTRL_RXCSUM_ANY (NFP_NET_CFG_CTRL_RXCSUM | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -143,6 +162,8 @@
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
+#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
+#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -381,4 +402,29 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
+/**
+ * General use mailbox area (0x1800 - 0x19ff)
+ * 4B used for update command and 4B return code
+ * followed by a max of 504B of variable length value
+ */
+#define NFP_NET_CFG_MBOX_CMD 0x1800
+#define NFP_NET_CFG_MBOX_RET 0x1804
+#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+
+/**
+ * VLAN filtering using general use mailbox
+ * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ */
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
+#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
+#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4077c59bf782..40217ece5fcb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
- if (!netif_running(nn->dp.netdev))
+ if (!nfp_net_running(nn))
goto out;
rxd_cnt = rx_ring->cnt;
@@ -62,7 +62,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
- seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
+ seq_printf(file, "RX[%02d,%02d]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u FL_RD=%u FL_WR=%u\n",
rx_ring->idx, rx_ring->fl_qcidx,
rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
@@ -138,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- if (!netif_running(nn->dp.netdev))
+ if (!nfp_net_running(nn))
goto out;
txd_cnt = tx_ring->cnt;
@@ -146,7 +146,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
@@ -200,7 +200,7 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek
};
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
struct dentry *queues, *tx, *rx, *xdp;
char name[20];
@@ -209,7 +209,10 @@ void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
if (IS_ERR_OR_NULL(nfp_dir))
return;
- sprintf(name, "port%d", id);
+ if (nfp_net_is_data_vnic(nn))
+ sprintf(name, "vnic%d", id);
+ else
+ strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir))
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index abbb47e60cc3..6e31355c3567 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -50,8 +50,10 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
@@ -134,14 +136,14 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
-static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version)
+static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
- if (!nn->cpp)
+ if (!app)
return;
- nsp = nfp_nsp_open(nn->cpp);
+ nsp = nfp_nsp_open(app->cpp);
if (IS_ERR(nsp))
return;
@@ -162,11 +164,12 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
- nfp_net_get_nspinfo(nn, nsp_version);
+ nfp_net_get_nspinfo(nn->app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d.%d %s",
+ "%d.%d.%d.%d %s %s %s",
nn->fw_ver.resv, nn->fw_ver.class,
- nn->fw_ver.major, nn->fw_ver.minor, nsp_version);
+ nn->fw_ver.major, nn->fw_ver.minor, nsp_version,
+ nfp_app_mip_name(nn->app), nfp_app_name(nn->app));
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
@@ -195,37 +198,38 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
[NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
[NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
};
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_net *nn;
u32 sts, ls;
+ /* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
- if (nn->eth_port)
- cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (eth_port)
+ cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
if (!netif_carrier_ok(netdev))
return 0;
/* Use link speed from ETH table if available, otherwise try the BAR */
- if (nn->eth_port) {
- int err;
-
- if (nfp_net_link_changed_read_clear(nn)) {
- err = nfp_net_refresh_eth_port(nn);
- if (err)
- return err;
- }
-
- cmd->base.port = nn->eth_port->port_type;
- cmd->base.speed = nn->eth_port->speed;
+ if (eth_port) {
+ cmd->base.port = eth_port->port_type;
+ cmd->base.speed = eth_port->speed;
cmd->base.duplex = DUPLEX_FULL;
return 0;
}
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return -EOPNOTSUPP;
+ nn = netdev_priv(netdev);
+
sts = nn_readl(nn, NFP_NET_CFG_STS);
ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
@@ -246,19 +250,22 @@ static int
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
struct nfp_nsp *nsp;
int err;
- if (!nn->eth_port)
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
return -EOPNOTSUPP;
if (netif_running(netdev)) {
- nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
return -EBUSY;
}
- nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+ nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
if (IS_ERR(nsp))
return PTR_ERR(nsp);
@@ -267,7 +274,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (err)
goto err_bad_set;
if (cmd->base.speed != SPEED_UNKNOWN) {
- u32 speed = cmd->base.speed / nn->eth_port->lanes;
+ u32 speed = cmd->base.speed / eth_port->lanes;
err = __nfp_eth_set_speed(nsp, speed);
if (err)
@@ -278,7 +285,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (err > 0)
return 0; /* no change */
- nfp_net_refresh_port_table(nn);
+ nfp_net_refresh_port_table(port);
return err;
@@ -496,7 +503,7 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
cmd->data = 0;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
@@ -533,7 +540,7 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
u32 nfp_rss_flag;
int err;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
/* RSS only supports IP SA/DA and L4 src/dst ports */
@@ -595,7 +602,7 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return 0;
return ARRAY_SIZE(nn->rss_itbl);
@@ -605,7 +612,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
return nfp_net_rss_key_sz(nn);
@@ -617,7 +624,7 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
if (indir)
@@ -641,7 +648,7 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
!(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
@@ -706,13 +713,13 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
struct nfp_resource *res;
int ret;
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
dump->version = 1;
dump->flag = NFP_DUMP_NSP_DIAG;
- res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG);
+ res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -722,7 +729,7 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
goto exit_release;
}
- ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res),
+ ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res),
nfp_resource_address(res),
buffer, dump->len);
if (ret != dump->len)
@@ -743,7 +750,7 @@ static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
if (val->flag != NFP_DUMP_NSP_DIAG)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 8cb87cbe1120..5797dbf2b507 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -43,6 +43,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/msi.h>
@@ -54,20 +55,21 @@
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
-
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
+#include "nfp_port.h"
#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
-static int nfp_is_ready(struct nfp_cpp *cpp)
+static int nfp_is_ready(struct nfp_pf *pf)
{
const char *cp;
long state;
int err;
- cp = nfp_hwinfo_lookup(cpp, "board.state");
+ cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state");
if (!cp)
return 0;
@@ -79,375 +81,277 @@ static int nfp_is_ready(struct nfp_cpp *cpp)
}
/**
- * nfp_net_map_area() - Help function to map an area
- * @cpp: NFP CPP handler
- * @name: Name for the area
- * @target: CPP target
- * @addr: CPP address
- * @size: Size of the area
- * @area: Area handle (returned).
- *
- * This function is primarily to simplify the code in the main probe
- * function. To undo the effect of this functions call
- * @nfp_cpp_area_release_free(*area);
- *
- * Return: Pointer to memory mapped area or ERR_PTR
- */
-static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
- const char *name, int isl, int target,
- unsigned long long addr, unsigned long size,
- struct nfp_cpp_area **area)
-{
- u8 __iomem *res;
- u32 dest;
- int err;
-
- dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
-
- *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
- if (!*area) {
- err = -EIO;
- goto err_area;
- }
-
- err = nfp_cpp_area_acquire(*area);
- if (err < 0)
- goto err_acquire;
-
- res = nfp_cpp_area_iomem(*area);
- if (!res) {
- err = -EIO;
- goto err_map;
- }
-
- return res;
-
-err_map:
- nfp_cpp_area_release(*area);
-err_acquire:
- nfp_cpp_area_free(*area);
-err_area:
- return (u8 __iomem *)ERR_PTR(err);
-}
-
-/**
* nfp_net_get_mac_addr() - Get the MAC address.
- * @nn: NFP Network structure
- * @cpp: NFP CPP handle
- * @id: NFP port id
+ * @pf: NFP PF handle
+ * @port: NFP port structure
*
* First try to get the MAC address from NSP ETH table. If that
- * fails try HWInfo. As a last resort generate a random address.
+ * fails generate a random address.
*/
-static void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
+void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port)
{
- struct nfp_net_dp *dp = &nn->dp;
- u8 mac_addr[ETH_ALEN];
- const char *mac_str;
- char name[32];
-
- if (nn->eth_port) {
- ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
- return;
- }
-
- snprintf(name, sizeof(name), "eth%d.mac", id);
-
- mac_str = nfp_hwinfo_lookup(cpp, name);
- if (!mac_str) {
- dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
- eth_hw_addr_random(dp->netdev);
- return;
- }
+ struct nfp_eth_table_port *eth_port;
- if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- &mac_addr[0], &mac_addr[1], &mac_addr[2],
- &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
- dev_warn(dp->dev,
- "Can't parse MAC address (%s). Generate.\n", mac_str);
- eth_hw_addr_random(dp->netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port) {
+ eth_hw_addr_random(port->netdev);
return;
}
- ether_addr_copy(dp->netdev->dev_addr, mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, mac_addr);
+ ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
+ ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
}
static struct nfp_eth_table_port *
-nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
+nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
{
int i;
for (i = 0; eth_tbl && i < eth_tbl->count; i++)
- if (eth_tbl->ports[i].eth_index == id)
+ if (eth_tbl->ports[i].index == index)
return &eth_tbl->ports[i];
return NULL;
}
-static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
+static int
+nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
+ unsigned int default_val)
{
char name[256];
- u16 interface;
- int pcie_pf;
int err = 0;
u64 val;
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
-
- snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
+ snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
- val = nfp_rtsym_read_le(pf->cpp, name, &err);
- /* Default to one port */
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
if (err) {
- if (err != -ENOENT)
- nfp_err(pf->cpp, "Unable to read adapter port count\n");
- val = 1;
+ if (err == -ENOENT)
+ return default_val;
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+ return err;
}
return val;
}
-static unsigned int
-nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
- unsigned int stride, u32 start_off, u32 num_off)
+static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{
- unsigned int i, min_qc, max_qc;
-
- min_qc = readl(ctrl_bar + start_off);
- max_qc = min_qc;
-
- for (i = 0; i < pf->num_ports; i++) {
- /* To make our lives simpler only accept configuration where
- * queues are allocated to PFs in order (queues of PFn all have
- * indexes lower than PFn+1).
- */
- if (max_qc > readl(ctrl_bar + start_off))
- return 0;
-
- max_qc = readl(ctrl_bar + start_off);
- max_qc += readl(ctrl_bar + num_off) * stride;
- ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
- }
+ return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
+}
- return max_qc - min_qc;
+static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
+{
+ return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
}
-static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
+static u8 __iomem *
+nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
+ unsigned int min_size, struct nfp_cpp_area **area)
{
- const struct nfp_rtsym *ctrl_sym;
- u8 __iomem *ctrl_bar;
char pf_symbol[256];
- u16 interface;
- int pcie_pf;
-
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
-
- snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
-
- ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
- if (!ctrl_sym) {
- dev_err(&pf->pdev->dev,
- "Failed to find PF BAR0 symbol %s\n", pf_symbol);
- return NULL;
- }
-
- if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
- dev_err(&pf->pdev->dev,
- "PF BAR0 too small to contain %d ports\n",
- pf->num_ports);
- return NULL;
- }
- ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
- ctrl_sym->domain, ctrl_sym->target,
- ctrl_sym->addr, ctrl_sym->size,
- &pf->ctrl_area);
- if (IS_ERR(ctrl_bar)) {
- dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
- PTR_ERR(ctrl_bar));
- return NULL;
- }
+ snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
+ nfp_cppcore_pcie_unit(pf->cpp));
- return ctrl_bar;
+ return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
}
-static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
+static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
- struct nfp_net *nn;
+ nfp_port_free(nn->port);
+ list_del(&nn->vnic_list);
+ pf->num_vnics--;
+ nfp_net_free(nn);
+}
- while (!list_empty(&pf->ports)) {
- nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
- list_del(&nn->port_list);
- pf->num_netdevs--;
+static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
+{
+ struct nfp_net *nn, *next;
- nfp_net_netdev_free(nn);
- }
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_free_vnic(pf, nn);
}
static struct nfp_net *
-nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver,
- struct nfp_eth_table_port *eth_port)
+nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
+ void __iomem *ctrl_bar, void __iomem *qc_bar,
+ int stride, unsigned int id)
{
- u32 n_tx_rings, n_rx_rings;
+ u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
struct nfp_net *nn;
+ int err;
+ tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
- /* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
+ /* Allocate and initialise the vNIC */
+ nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
- nn->cpp = pf->cpp;
- nn->fw_ver = *fw_ver;
+ nn->app = pf->app;
+ nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->dp.ctrl_bar = ctrl_bar;
- nn->tx_bar = tx_bar;
- nn->rx_bar = rx_bar;
+ nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
+ nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
- nn->eth_port = eth_port;
+
+ if (needs_netdev) {
+ err = nfp_app_vnic_init(pf->app, nn, id);
+ if (err) {
+ nfp_net_free(nn);
+ return ERR_PTR(err);
+ }
+ }
+
+ pf->num_vnics++;
+ list_add_tail(&nn->vnic_list, &pf->vnics);
return nn;
}
static int
-nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
- unsigned int id)
+nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get MAC address */
- nfp_net_get_mac_addr(nn, pf->cpp, id);
-
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
* to get the value from nfp-hwinfo into ctrl bar
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
return err;
- nfp_net_debugfs_port_add(nn, pf->ddir, id);
+ nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
+
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ goto err_dfs_clean;
+ }
nfp_net_info(nn);
return 0;
+
+err_dfs_clean:
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+ return err;
}
static int
-nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver)
+nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *qc_bar, int stride)
{
- u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
- struct nfp_eth_table_port *eth_port;
struct nfp_net *nn;
unsigned int i;
int err;
- prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-
- for (i = 0; i < pf->num_ports; i++) {
- tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
- rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
- prev_tx_base = tgt_tx_base;
- prev_rx_base = tgt_rx_base;
-
- eth_port = nfp_net_find_port(pf->eth_tbl, i);
- if (eth_port && eth_port->override_changed) {
- nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
- } else {
- nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
- rx_bar, stride,
- fw_ver, eth_port);
- if (IS_ERR(nn)) {
- err = PTR_ERR(nn);
- goto err_free_prev;
- }
- list_add_tail(&nn->port_list, &pf->ports);
- pf->num_netdevs++;
+ for (i = 0; i < pf->max_data_vnics; i++) {
+ nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
+ stride, i);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
}
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+
+ /* Kill the vNIC if app init marked it as invalid */
+ if (nn->port && nn->port->type == NFP_PORT_INVALID) {
+ nfp_net_pf_free_vnic(pf, nn);
+ continue;
+ }
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
return -ENODEV;
return 0;
err_free_prev:
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
return err;
}
-static int
-nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
- void __iomem *ctrl_bar, void __iomem *tx_bar,
- void __iomem *rx_bar, int stride,
- struct nfp_net_fw_version *fw_ver)
+static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
- unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
- struct nfp_net *nn;
- int err;
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+ nfp_app_vnic_clean(pf->app, nn);
+}
- /* Allocate the netdevs and do basic init */
- err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, fw_ver);
- if (err)
- return err;
+static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
+{
+ unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
+ struct nfp_net *nn;
/* Get MSI-X vectors */
wanted_irqs = 0;
- list_for_each_entry(nn, &pf->ports, port_list)
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
- if (!pf->irq_entries) {
- err = -ENOMEM;
- goto err_nn_free;
- }
+ if (!pf->irq_entries)
+ return -ENOMEM;
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
- NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
+ NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs);
if (!num_irqs) {
- nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
- err = -ENOMEM;
- goto err_vec_free;
+ nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
+ kfree(pf->irq_entries);
+ return -ENOMEM;
}
- /* Distribute IRQs to ports */
+ /* Distribute IRQs to vNICs */
irqs_left = num_irqs;
- ports_left = pf->num_netdevs;
- list_for_each_entry(nn, &pf->ports, port_list) {
+ vnics_left = pf->num_vnics;
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n;
- n = DIV_ROUND_UP(irqs_left, ports_left);
+ n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
+ DIV_ROUND_UP(irqs_left, vnics_left));
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n);
irqs_left -= n;
- ports_left--;
+ vnics_left--;
}
- /* Finish netdev init and register */
+ return 0;
+}
+
+static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
+{
+ nfp_net_irqs_disable(pf->pdev);
+ kfree(pf->irq_entries);
+}
+
+static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
+{
+ struct nfp_net *nn;
+ unsigned int id;
+ int err;
+
+ /* Finish vNIC init and register */
id = 0;
- list_for_each_entry(nn, &pf->ports, port_list) {
- err = nfp_net_pf_init_port_netdev(pf, nn, id);
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ err = nfp_net_pf_init_vnic(pf, nn, id);
if (err)
goto err_prev_deinit;
@@ -457,118 +361,326 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
return 0;
err_prev_deinit:
- list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
+ list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
+ return err;
+}
+
+static int
+nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
+{
+ u8 __iomem *ctrl_bar;
+ int err;
+
+ pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
+ if (IS_ERR(pf->app))
+ return PTR_ERR(pf->app);
+
+ err = nfp_app_init(pf->app);
+ if (err)
+ goto err_free;
+
+ if (!nfp_app_needs_ctrl_vnic(pf->app))
+ return 0;
+
+ ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
+ NFP_PF_CSR_SLICE_SIZE,
+ &pf->ctrl_vnic_bar);
+ if (IS_ERR(ctrl_bar)) {
+ nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
+ err = PTR_ERR(ctrl_bar);
+ goto err_app_clean;
}
- nfp_net_irqs_disable(pf->pdev);
-err_vec_free:
- kfree(pf->irq_entries);
-err_nn_free:
- nfp_net_pf_free_netdevs(pf);
+
+ pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
+ stride, 0);
+ if (IS_ERR(pf->ctrl_vnic)) {
+ err = PTR_ERR(pf->ctrl_vnic);
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
+err_app_clean:
+ nfp_app_clean(pf->app);
+err_free:
+ nfp_app_free(pf->app);
+ pf->app = NULL;
+ return err;
+}
+
+static void nfp_net_pf_app_clean(struct nfp_pf *pf)
+{
+ if (pf->ctrl_vnic) {
+ nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
+ nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
+ }
+ nfp_app_clean(pf->app);
+ nfp_app_free(pf->app);
+ pf->app = NULL;
+}
+
+static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
+{
+ int err;
+
+ if (!pf->ctrl_vnic)
+ return 0;
+ err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
+ if (err)
+ return err;
+
+ err = nfp_ctrl_open(pf->ctrl_vnic);
+ if (err)
+ goto err_clean_ctrl;
+
+ return 0;
+
+err_clean_ctrl:
+ nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
+ return err;
+}
+
+static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
+{
+ if (!pf->ctrl_vnic)
+ return;
+ nfp_ctrl_close(pf->ctrl_vnic);
+ nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
+}
+
+static int nfp_net_pf_app_start(struct nfp_pf *pf)
+{
+ int err;
+
+ err = nfp_net_pf_app_start_ctrl(pf);
+ if (err)
+ return err;
+
+ err = nfp_app_start(pf->app, pf->ctrl_vnic);
+ if (err)
+ goto err_ctrl_stop;
+
+ if (pf->num_vfs) {
+ err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
+ if (err)
+ goto err_app_stop;
+ }
+
+ return 0;
+
+err_app_stop:
+ nfp_app_stop(pf->app);
+err_ctrl_stop:
+ nfp_net_pf_app_stop_ctrl(pf);
+ return err;
+}
+
+static void nfp_net_pf_app_stop(struct nfp_pf *pf)
+{
+ if (pf->num_vfs)
+ nfp_app_sriov_disable(pf->app);
+ nfp_app_stop(pf->app);
+ nfp_net_pf_app_stop_ctrl(pf);
+}
+
+static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
+{
+ if (pf->vf_cfg_bar)
+ nfp_cpp_area_release_free(pf->vf_cfg_bar);
+ if (pf->mac_stats_bar)
+ nfp_cpp_area_release_free(pf->mac_stats_bar);
+ nfp_cpp_area_release_free(pf->qc_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
+}
+
+static int nfp_net_pci_map_mem(struct nfp_pf *pf)
+{
+ u8 __iomem *mem;
+ u32 min_size;
+ int err;
+
+ min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
+ mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
+ min_size, &pf->data_vnic_bar);
+ if (IS_ERR(mem)) {
+ nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
+ return PTR_ERR(mem);
+ }
+
+ min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
+ pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
+ "net.macstats", min_size,
+ &pf->mac_stats_bar);
+ if (IS_ERR(pf->mac_stats_mem)) {
+ if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
+ err = PTR_ERR(pf->mac_stats_mem);
+ goto err_unmap_ctrl;
+ }
+ pf->mac_stats_mem = NULL;
+ }
+
+ pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
+ "_pf%d_net_vf_bar",
+ NFP_NET_CFG_BAR_SZ *
+ pf->limit_vfs, &pf->vf_cfg_bar);
+ if (IS_ERR(pf->vf_cfg_mem)) {
+ if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
+ err = PTR_ERR(pf->vf_cfg_mem);
+ goto err_unmap_mac_stats;
+ }
+ pf->vf_cfg_mem = NULL;
+ }
+
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
+ NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
+ &pf->qc_area);
+ if (IS_ERR(mem)) {
+ nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
+ err = PTR_ERR(mem);
+ goto err_unmap_vf_cfg;
+ }
+
+ return 0;
+
+err_unmap_vf_cfg:
+ if (pf->vf_cfg_bar)
+ nfp_cpp_area_release_free(pf->vf_cfg_bar);
+err_unmap_mac_stats:
+ if (pf->mac_stats_bar)
+ nfp_cpp_area_release_free(pf->mac_stats_bar);
+err_unmap_ctrl:
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
return err;
}
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_irqs_disable(pf->pdev);
- kfree(pf->irq_entries);
+ nfp_net_pf_free_irqs(pf);
+ nfp_net_pf_app_clean(pf);
+ nfp_net_pci_unmap_mem(pf);
+}
- nfp_cpp_area_release_free(pf->rx_area);
- nfp_cpp_area_release_free(pf->tx_area);
- nfp_cpp_area_release_free(pf->ctrl_area);
+static int
+nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
+ struct nfp_eth_table *eth_table)
+{
+ struct nfp_eth_table_port *eth_port;
+
+ ASSERT_RTNL();
+
+ eth_port = nfp_net_find_port(eth_table, port->eth_id);
+ if (!eth_port) {
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
+ port->eth_id);
+ return -EIO;
+ }
+ if (eth_port->override_changed) {
+ nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
+ port->type = NFP_PORT_INVALID;
+ }
+
+ memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+
+ return 0;
}
-static void nfp_net_refresh_netdevs(struct work_struct *work)
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
- struct nfp_pf *pf = container_of(work, struct nfp_pf,
- port_refresh_work);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
+ struct nfp_port *port;
- mutex_lock(&pf->port_lock);
+ lockdep_assert_held(&pf->lock);
/* Check for nfp_net_pci_remove() racing against us */
- if (list_empty(&pf->ports))
- goto out;
+ if (list_empty(&pf->vnics))
+ return 0;
- list_for_each_entry(nn, &pf->ports, port_list)
- nfp_net_link_changed_read_clear(nn);
+ /* Update state of all ports */
+ rtnl_lock();
+ list_for_each_entry(port, &pf->ports, port_list)
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
eth_table = nfp_eth_read_ports(pf->cpp);
if (!eth_table) {
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ rtnl_unlock();
nfp_err(pf->cpp, "Error refreshing port config!\n");
- goto out;
+ return -EIO;
}
- rtnl_lock();
- list_for_each_entry(nn, &pf->ports, port_list) {
- if (!nn->eth_port)
- continue;
- nn->eth_port = nfp_net_find_port(eth_table,
- nn->eth_port->eth_index);
- }
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ nfp_net_eth_port_update(pf->cpp, port, eth_table);
rtnl_unlock();
- kfree(pf->eth_tbl);
- pf->eth_tbl = eth_table;
+ kfree(eth_table);
- list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
- if (!nn->eth_port) {
- nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
- continue;
- }
- if (!nn->eth_port->override_changed)
+ /* Shoot off the ports which became invalid */
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nn->port || nn->port->type != NFP_PORT_INVALID)
continue;
- nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
-
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
-
- list_del(&nn->port_list);
- pf->num_netdevs--;
- nfp_net_netdev_free(nn);
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
nfp_net_pci_remove_finish(pf);
-out:
- mutex_unlock(&pf->port_lock);
+
+ return 0;
}
-void nfp_net_refresh_port_table(struct nfp_net *nn)
+static void nfp_net_refresh_vnics(struct work_struct *work)
{
- struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+ struct nfp_pf *pf = container_of(work, struct nfp_pf,
+ port_refresh_work);
- schedule_work(&pf->port_refresh_work);
+ mutex_lock(&pf->lock);
+ nfp_net_refresh_port_table_sync(pf);
+ mutex_unlock(&pf->lock);
}
-int nfp_net_refresh_eth_port(struct nfp_net *nn)
+void nfp_net_refresh_port_table(struct nfp_port *port)
{
- struct nfp_eth_table_port *eth_port;
+ struct nfp_pf *pf = port->app->pf;
+
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+
+ queue_work(pf->wq, &pf->port_refresh_work);
+}
+
+int nfp_net_refresh_eth_port(struct nfp_port *port)
+{
+ struct nfp_cpp *cpp = port->app->cpp;
struct nfp_eth_table *eth_table;
+ int ret;
- eth_table = nfp_eth_read_ports(nn->cpp);
- if (!eth_table) {
- nn_err(nn, "Error refreshing port state table!\n");
- return -EIO;
- }
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
- eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
- if (!eth_port) {
- nn_err(nn, "Error finding state of the port!\n");
- kfree(eth_table);
+ eth_table = nfp_eth_read_ports(cpp);
+ if (!eth_table) {
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_err(cpp, "Error refreshing port state table!\n");
return -EIO;
}
- memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
+ ret = nfp_net_eth_port_update(cpp, port, eth_table);
kfree(eth_table);
- return 0;
+ return ret;
}
/*
@@ -576,38 +688,49 @@ int nfp_net_refresh_eth_port(struct nfp_net *nn)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
- u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
- u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver;
- u32 tx_area_sz, rx_area_sz;
- u32 start_q;
+ u8 __iomem *ctrl_bar, *qc_bar;
int stride;
int err;
- INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
- mutex_init(&pf->port_lock);
+ INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
/* Verify that the board has completed initialization */
- if (!nfp_is_ready(pf->cpp)) {
+ if (!nfp_is_ready(pf)) {
nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
return -EINVAL;
}
- mutex_lock(&pf->port_lock);
- pf->num_ports = nfp_net_pf_get_num_ports(pf);
+ if (!pf->rtbl) {
+ nfp_err(pf->cpp, "No %s, giving up.\n",
+ pf->fw_loaded ? "symbol table" : "firmware found");
+ return -EPROBE_DEFER;
+ }
- ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
- if (!ctrl_bar) {
- err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ mutex_lock(&pf->lock);
+ pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
+ if ((int)pf->max_data_vnics < 0) {
+ err = pf->max_data_vnics;
goto err_unlock;
}
+ err = nfp_net_pci_map_mem(pf);
+ if (err)
+ goto err_unlock;
+
+ ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
+ qc_bar = nfp_cpp_area_iomem(pf->qc_area);
+ if (!ctrl_bar || !qc_bar) {
+ err = -EIO;
+ goto err_unmap;
+ }
+
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
- goto err_ctrl_unmap;
+ goto err_unmap;
}
/* Determine stride */
@@ -616,7 +739,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 4:
+ case 1 ... 5:
stride = 4;
break;
default:
@@ -624,69 +747,51 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
fw_ver.resv, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
- goto err_ctrl_unmap;
+ goto err_unmap;
}
}
- /* Find how many QC structs need to be mapped */
- total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
- NFP_NET_CFG_START_TXQ,
- NFP_NET_CFG_MAX_TXRINGS);
- total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
- NFP_NET_CFG_START_RXQ,
- NFP_NET_CFG_MAX_RXRINGS);
- if (!total_tx_qcs || !total_rx_qcs) {
- nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
- total_tx_qcs, total_rx_qcs);
- err = -EINVAL;
- goto err_ctrl_unmap;
- }
-
- tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
- rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
-
- /* Map TX queues */
- start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
- NFP_PCIE_QUEUE(start_q),
- tx_area_sz, &pf->tx_area);
- if (IS_ERR(tx_bar)) {
- nfp_err(pf->cpp, "Failed to map TX area.\n");
- err = PTR_ERR(tx_bar);
- goto err_ctrl_unmap;
- }
-
- /* Map RX queues */
- start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
- NFP_PCIE_QUEUE(start_q),
- rx_area_sz, &pf->rx_area);
- if (IS_ERR(rx_bar)) {
- nfp_err(pf->cpp, "Failed to map RX area.\n");
- err = PTR_ERR(rx_bar);
- goto err_unmap_tx;
- }
+ err = nfp_net_pf_app_init(pf, qc_bar, stride);
+ if (err)
+ goto err_unmap;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
- err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, &fw_ver);
+ /* Allocate the vnics and do basic init */
+ err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
if (err)
goto err_clean_ddir;
- mutex_unlock(&pf->port_lock);
+ err = nfp_net_pf_alloc_irqs(pf);
+ if (err)
+ goto err_free_vnics;
+
+ err = nfp_net_pf_app_start(pf);
+ if (err)
+ goto err_free_irqs;
+
+ err = nfp_net_pf_init_vnics(pf);
+ if (err)
+ goto err_stop_app;
+
+ mutex_unlock(&pf->lock);
return 0;
+err_stop_app:
+ nfp_net_pf_app_stop(pf);
+err_free_irqs:
+ nfp_net_pf_free_irqs(pf);
+err_free_vnics:
+ nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_cpp_area_release_free(pf->rx_area);
-err_unmap_tx:
- nfp_cpp_area_release_free(pf->tx_area);
-err_ctrl_unmap:
- nfp_cpp_area_release_free(pf->ctrl_area);
+ nfp_net_pf_app_clean(pf);
+err_unmap:
+ nfp_net_pci_unmap_mem(pf);
err_unlock:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
return err;
}
@@ -694,21 +799,19 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn;
- mutex_lock(&pf->port_lock);
- if (list_empty(&pf->ports))
+ mutex_lock(&pf->lock);
+ if (list_empty(&pf->vnics))
goto out;
- list_for_each_entry(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
-
- nfp_net_netdev_clean(nn->dp.netdev);
- }
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
nfp_net_pci_remove_finish(pf);
out:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
new file mode 100644
index 000000000000..8ec5474f4b18
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/lockdep.h>
+#include <net/dst_metadata.h>
+#include <net/switchdev.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net_repr.h"
+#include "nfp_port.h"
+
+static void
+nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
+ int tx_status)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_repr_pcpu_stats *stats;
+
+ if (unlikely(tx_status != NET_XMIT_SUCCESS &&
+ tx_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+}
+
+void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_repr_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+}
+
+static void
+nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE;
+
+ /* TX and RX stats are flipped as we are returning the stats as seen
+ * at the switch port corresponding to the phys port.
+ */
+ stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
+ stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
+
+ stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
+ stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
+}
+
+static void
+nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ;
+
+ /* TX and RX stats are flipped as we are returning the stats as seen
+ * at the switch port corresponding to the VF.
+ */
+ stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
+ stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
+ stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
+}
+
+static void
+nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
+ struct rtnl_link_stats64 *stats)
+{
+ u8 __iomem *mem;
+
+ if (pf)
+ return;
+
+ mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar);
+
+ stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
+ stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
+ stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
+ stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
+ stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
+}
+
+static void
+nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_app *app = repr->app;
+
+ if (WARN_ON(!repr->port))
+ return;
+
+ switch (repr->port->type) {
+ case NFP_PORT_PHYS_PORT:
+ eth_port = __nfp_port_get_eth_port(repr->port);
+ if (!eth_port)
+ break;
+ nfp_repr_phy_port_get_stats64(app, eth_port->index, stats);
+ break;
+ case NFP_PORT_PF_PORT:
+ nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats);
+ break;
+ case NFP_PORT_VF_PORT:
+ nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats);
+ default:
+ break;
+ }
+}
+
+static bool
+nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int
+nfp_repr_get_host_stats64(const struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct nfp_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
+
+ return 0;
+}
+
+static int
+nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *stats)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return nfp_repr_get_host_stats64(dev, stats);
+ }
+
+ return -EINVAL;
+}
+
+static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ unsigned int len = skb->len;
+ int ret;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->dev = repr->dst->u.port_info.lower_dev;
+
+ ret = dev_queue_xmit(skb);
+ nfp_repr_inc_tx_stats(netdev, len, ret);
+
+ return ret;
+}
+
+static int nfp_repr_stop(struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_repr_stop(repr->app, repr);
+}
+
+static int nfp_repr_open(struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_repr_open(repr->app, repr);
+}
+
+const struct net_device_ops nfp_repr_netdev_ops = {
+ .ndo_open = nfp_repr_open,
+ .ndo_stop = nfp_repr_stop,
+ .ndo_start_xmit = nfp_repr_xmit,
+ .ndo_get_stats64 = nfp_repr_get_stats64,
+ .ndo_has_offload_stats = nfp_repr_has_offload_stats,
+ .ndo_get_offload_stats = nfp_repr_get_offload_stats,
+ .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
+ .ndo_setup_tc = nfp_port_setup_tc,
+};
+
+static void nfp_repr_clean(struct nfp_repr *repr)
+{
+ unregister_netdev(repr->netdev);
+ dst_release((struct dst_entry *)repr->dst);
+ nfp_port_free(repr->port);
+}
+
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+static struct lock_class_key nfp_repr_netdev_addr_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
+int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ u32 cmsg_port_id, struct nfp_port *port,
+ struct net_device *pf_netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ int err;
+
+ nfp_repr_set_lockdep_class(netdev);
+
+ repr->port = port;
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ if (!repr->dst)
+ return -ENOMEM;
+ repr->dst->u.port_info.port_id = cmsg_port_id;
+ repr->dst->u.port_info.lower_dev = pf_netdev;
+
+ netdev->netdev_ops = &nfp_repr_netdev_ops;
+ SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+
+ if (nfp_app_has_tc(app)) {
+ netdev->features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_clean;
+
+ return 0;
+
+err_clean:
+ dst_release((struct dst_entry *)repr->dst);
+ return err;
+}
+
+static void nfp_repr_free(struct nfp_repr *repr)
+{
+ free_percpu(repr->stats);
+ free_netdev(repr->netdev);
+}
+
+struct net_device *nfp_repr_alloc(struct nfp_app *app)
+{
+ struct net_device *netdev;
+ struct nfp_repr *repr;
+
+ netdev = alloc_etherdev(sizeof(*repr));
+ if (!netdev)
+ return NULL;
+
+ repr = netdev_priv(netdev);
+ repr->netdev = netdev;
+ repr->app = app;
+
+ repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
+ if (!repr->stats)
+ goto err_free_netdev;
+
+ return netdev;
+
+err_free_netdev:
+ free_netdev(netdev);
+ return NULL;
+}
+
+static void nfp_repr_clean_and_free(struct nfp_repr *repr)
+{
+ nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
+ repr->netdev->name);
+ nfp_repr_clean(repr);
+ nfp_repr_free(repr);
+}
+
+void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+{
+ unsigned int i;
+
+ for (i = 0; i < reprs->num_reprs; i++)
+ if (reprs->reprs[i])
+ nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+
+ kfree(reprs);
+}
+
+void
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type)
+{
+ struct nfp_reprs *reprs;
+
+ reprs = nfp_app_reprs_set(app, type, NULL);
+ if (!reprs)
+ return;
+
+ synchronize_rcu();
+ nfp_reprs_clean_and_free(reprs);
+}
+
+struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
+{
+ struct nfp_reprs *reprs;
+
+ reprs = kzalloc(sizeof(*reprs) +
+ num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+ if (!reprs)
+ return NULL;
+ reprs->num_reprs = num_reprs;
+
+ return reprs;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
new file mode 100644
index 000000000000..32179cad062a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_NET_REPR_H
+#define NFP_NET_REPR_H
+
+struct metadata_dst;
+struct nfp_net;
+struct nfp_port;
+
+#include <net/dst_metadata.h>
+
+/**
+ * struct nfp_reprs - container for representor netdevs
+ * @num_reprs: Number of elements in reprs array
+ * @reprs: Array of representor netdevs
+ */
+struct nfp_reprs {
+ unsigned int num_reprs;
+ struct net_device *reprs[0];
+};
+
+/**
+ * struct nfp_repr_pcpu_stats
+ * @rx_packets: Received packets
+ * @rx_bytes: Received bytes
+ * @tx_packets: Transmitted packets
+ * @tx_bytes: Transmitted dropped
+ * @tx_drops: Packets dropped on transmit
+ * @syncp: Reference count
+ */
+struct nfp_repr_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct nfp_repr - priv data for representor netdevs
+ * @netdev: Back pointer to netdev
+ * @dst: Destination for packet TX
+ * @port: Port of representor
+ * @app: APP handle
+ * @stats: Statistic of packets hitting CPU
+ */
+struct nfp_repr {
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+ struct nfp_port *port;
+ struct nfp_app *app;
+ struct nfp_repr_pcpu_stats __percpu *stats;
+};
+
+/**
+ * enum nfp_repr_type - type of representor
+ * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
+ * @NFP_REPR_TYPE_PF: physical function
+ * @NFP_REPR_TYPE_VF: virtual function
+ */
+enum nfp_repr_type {
+ NFP_REPR_TYPE_PHYS_PORT,
+ NFP_REPR_TYPE_PF,
+ NFP_REPR_TYPE_VF,
+
+ __NFP_REPR_TYPE_MAX,
+};
+#define NFP_REPR_TYPE_MAX (__NFP_REPR_TYPE_MAX - 1)
+
+extern const struct net_device_ops nfp_repr_netdev_ops;
+
+static inline bool nfp_netdev_is_nfp_repr(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &nfp_repr_netdev_ops;
+}
+
+static inline int nfp_repr_get_port_id(struct net_device *netdev)
+{
+ struct nfp_repr *priv = netdev_priv(netdev);
+
+ return priv->dst->u.port_info.port_id;
+}
+
+void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ u32 cmsg_port_id, struct nfp_port *port,
+ struct net_device *pf_netdev);
+struct net_device *nfp_repr_alloc(struct nfp_app *app);
+void
+nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
+void
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
+struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
+
+#endif /* NFP_NET_REPR_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 86e61be6f35c..c879626e035b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -161,7 +161,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 4:
+ case 1 ... 5:
stride = 4;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = tx_bar_no;
@@ -202,7 +202,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -267,7 +267,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_netvf_get_mac_addr(nn);
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
- NFP_NET_MIN_PORT_IRQS,
+ NFP_NET_MIN_VNIC_IRQS,
NFP_NET_NON_Q_VECTORS +
nn->dp.num_r_vecs);
if (!num_irqs) {
@@ -283,13 +283,13 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev);
- nfp_net_debugfs_port_add(nn, vf->ddir, 0);
+ nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
return 0;
@@ -304,7 +304,7 @@ err_unmap_tx:
else
iounmap(vf->q_bar);
err_netdev_free:
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
err_ctrl_unmap:
iounmap(ctrl_bar);
err_pci_regions:
@@ -328,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->dp.netdev);
+ nfp_net_clean(nn);
nfp_net_irqs_disable(pdev);
@@ -340,7 +340,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
}
iounmap(nn->dp.ctrl_bar);
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
new file mode 100644
index 000000000000..e42644dbb865
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/lockdep.h>
+#include <net/switchdev.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net.h"
+#include "nfp_port.h"
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
+{
+ if (nfp_netdev_is_nfp_net(netdev)) {
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->port;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return repr->port;
+ }
+
+ WARN(1, "Unknown netdev type for nfp_port\n");
+
+ return NULL;
+}
+
+static int
+nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: {
+ const u8 *serial;
+ /* N.B: attr->u.ppid.id is binary data */
+ attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial);
+ memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+const struct switchdev_ops nfp_port_switchdev_ops = {
+ .switchdev_port_attr_get = nfp_port_attr_get,
+};
+
+int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct nfp_port *port;
+
+ if (chain_index)
+ return -EOPNOTSUPP;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ return nfp_app_setup_tc(port->app, netdev, handle, proto, tc);
+}
+
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
+{
+ struct nfp_port *port;
+
+ lockdep_assert_held(&pf->lock);
+
+ if (type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (port->eth_id == id)
+ return port;
+
+ return NULL;
+}
+
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!port)
+ return NULL;
+ if (port->type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ return port->eth_port;
+}
+
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!__nfp_port_get_eth_port(port))
+ return NULL;
+
+ if (test_bit(NFP_PORT_CHANGED, &port->flags))
+ if (nfp_net_refresh_eth_port(port))
+ return NULL;
+
+ return __nfp_port_get_eth_port(port);
+}
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int n;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ switch (port->type) {
+ case NFP_PORT_PHYS_PORT:
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!eth_port->is_split)
+ n = snprintf(name, len, "p%d", eth_port->label_port);
+ else
+ n = snprintf(name, len, "p%ds%d", eth_port->label_port,
+ eth_port->label_subport);
+ break;
+ case NFP_PORT_PF_PORT:
+ n = snprintf(name, len, "pf%d", port->pf_id);
+ break;
+ case NFP_PORT_VF_PORT:
+ n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (n >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_port *port, unsigned int id)
+{
+ /* Check if vNIC has external port associated and cfg is OK */
+ if (!pf->eth_tbl || id >= pf->eth_tbl->count) {
+ nfp_err(app->cpp,
+ "NSP port entries don't match vNICs (no entry %d)\n",
+ id);
+ return -EINVAL;
+ }
+ if (pf->eth_tbl->ports[id].override_changed) {
+ nfp_warn(app->cpp,
+ "Config changed for port #%d, reboot required before port will be operational\n",
+ pf->eth_tbl->ports[id].index);
+ port->type = NFP_PORT_INVALID;
+ return 0;
+ }
+
+ port->eth_port = &pf->eth_tbl->ports[id];
+ port->eth_id = pf->eth_tbl->ports[id].index;
+
+ return 0;
+}
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev)
+{
+ struct nfp_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->netdev = netdev;
+ port->type = type;
+ port->app = app;
+
+ list_add_tail(&port->port_list, &app->pf->ports);
+
+ return port;
+}
+
+void nfp_port_free(struct nfp_port *port)
+{
+ if (!port)
+ return;
+ list_del(&port->port_list);
+ kfree(port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
new file mode 100644
index 000000000000..a33d22e18f94
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_PORT_H_
+#define _NFP_PORT_H_
+
+#include <net/devlink.h>
+
+struct tc_to_netdev;
+struct net_device;
+struct nfp_app;
+struct nfp_pf;
+struct nfp_port;
+
+/**
+ * enum nfp_port_type - type of port NFP can switch traffic to
+ * @NFP_PORT_INVALID: port is invalid, %NFP_PORT_PHYS_PORT transitions to this
+ * state when port disappears because of FW fault or config
+ * change
+ * @NFP_PORT_PHYS_PORT: external NIC port
+ * @NFP_PORT_PF_PORT: logical port of PCI PF
+ * @NFP_PORT_VF_PORT: logical port of PCI VF
+ */
+enum nfp_port_type {
+ NFP_PORT_INVALID,
+ NFP_PORT_PHYS_PORT,
+ NFP_PORT_PF_PORT,
+ NFP_PORT_VF_PORT,
+};
+
+/**
+ * enum nfp_port_flags - port flags (can be type-specific)
+ * @NFP_PORT_CHANGED: port state has changed since last eth table refresh;
+ * for NFP_PORT_PHYS_PORT, never set otherwise; must hold
+ * rtnl_lock to clear
+ */
+enum nfp_port_flags {
+ NFP_PORT_CHANGED = 0,
+};
+
+/**
+ * struct nfp_port - structure representing NFP port
+ * @netdev: backpointer to associated netdev
+ * @type: what port type does the entity represent
+ * @flags: port flags
+ * @app: backpointer to the app structure
+ * @dl_port: devlink port structure
+ * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
+ * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+ * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
+ * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
+ * @port_list: entry on pf's list of ports
+ */
+struct nfp_port {
+ struct net_device *netdev;
+ enum nfp_port_type type;
+
+ unsigned long flags;
+
+ struct nfp_app *app;
+
+ struct devlink_port dl_port;
+
+ union {
+ /* NFP_PORT_PHYS_PORT */
+ struct {
+ unsigned int eth_id;
+ struct nfp_eth_table_port *eth_port;
+ };
+ /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
+ struct {
+ unsigned int pf_id;
+ unsigned int vf_id;
+ };
+ };
+
+ struct list_head port_list;
+};
+
+extern const struct switchdev_ops nfp_port_switchdev_ops;
+
+int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len);
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev);
+void nfp_port_free(struct nfp_port *port);
+
+int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_port *port, unsigned int id);
+
+int nfp_net_refresh_eth_port(struct nfp_port *port);
+void nfp_net_refresh_port_table(struct nfp_port *port);
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
+void nfp_devlink_port_unregister(struct nfp_port *port);
+
+/**
+ * Mac stats (0x0000 - 0x0200)
+ * all counters are 64bit.
+ */
+#define NFP_MAC_STATS_BASE 0x0000
+#define NFP_MAC_STATS_SIZE 0x0200
+
+#define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000)
+#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
+#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
+#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
+#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
+#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
+#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
+#define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040)
+#define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048)
+#define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050)
+#define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058)
+#define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060)
+#define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068)
+#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070)
+#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078)
+#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080)
+#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088)
+#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090)
+#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098)
+#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0)
+#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8)
+#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0)
+#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8)
+#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0)
+#define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x0f0)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x0f8)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x100)
+#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108)
+#define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110)
+#define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118)
+
+#define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138)
+#define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140)
+#define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150)
+#define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158)
+#define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160)
+#define NFP_MAC_STATS_TX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x168)
+#define NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x170)
+#define NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x178)
+#define NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x180)
+#define NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x188)
+#define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190)
+#define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198)
+#define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0)
+#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8)
+#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0)
+#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8)
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 4df2ce261b3f..1a8d04a1e113 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -46,7 +46,9 @@
/* Implemented in nfp_hwinfo.c */
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
+struct nfp_hwinfo;
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
/* Implemented in nfp_nsp.c, low level functions */
@@ -64,6 +66,8 @@ int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 43dc68e01274..cd678323bacb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -119,6 +119,11 @@
#define NFP_PCIE_EM 0x020000
#define NFP_PCIE_SRAM 0x000000
+/* Minimal size of the PCIe cfg memory we depend on being mapped,
+ * queue controller and DMA controller don't have to be covered.
+ */
+#define NFP_PCI_MIN_MAP_SIZE 0x080000
+
#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
@@ -583,9 +588,15 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
};
+ char status_msg[196] = {};
struct nfp_bar *bar;
int i, bars_free;
int expl_groups;
+ char *msg, *end;
+
+ msg = status_msg +
+ snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
+ end = status_msg + sizeof(status_msg) - 1;
bar = &nfp->bar[0];
for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
@@ -628,34 +639,38 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
- nfp_bar_resource_len(bar));
+ if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
+ bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ nfp_bar_resource_len(bar));
if (bar->iomem) {
- dev_info(nfp->dev,
- "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
+ msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
atomic_inc(&bar->refcnt);
bars_free--;
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+
+ if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
+ nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ } else {
+ int pf = nfp->pdev->devfn & 7;
+
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ }
+ nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
}
if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
- nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
- nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000)
expl_groups = 4;
- } else {
- int pf = nfp->pdev->devfn & 7;
-
- nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ else
expl_groups = 1;
- }
- nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
bar = &nfp->bar[1];
- dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
+ msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
atomic_inc(&bar->refcnt);
bars_free--;
@@ -674,9 +689,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
- dev_info(nfp->dev,
- "BAR0.%d RESERVED: Explicit%d Mapping\n",
- 4 + i, i);
+ msg += snprintf(msg, end - msg,
+ "0.%d: Explicit%d, ", 4 + i, i);
atomic_inc(&bar->refcnt);
bars_free--;
@@ -694,8 +708,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
bar_cmp, NULL);
- dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
- nfp->bars, bars_free);
+ dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index edecc0a27485..5798adc57cbc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -42,6 +42,7 @@
#include <linux/ctype.h>
#include <linux/types.h>
+#include <linux/sizes.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
@@ -59,6 +60,13 @@
#define PCI_64BIT_BAR_COUNT 3
#define NFP_CPP_NUM_TARGETS 16
+/* Max size of area it should be safe to request */
+#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
+
+/* NFP_MUTEX_WAIT_* are timeouts in seconds when waiting for a mutex */
+#define NFP_MUTEX_WAIT_FIRST_WARN 15
+#define NFP_MUTEX_WAIT_NEXT_WARN 5
+#define NFP_MUTEX_WAIT_ERROR 60
struct device;
@@ -214,13 +222,6 @@ u32 nfp_cpp_model(struct nfp_cpp *cpp);
u16 nfp_cpp_interface(struct nfp_cpp *cpp);
int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
-void *nfp_rtsym_cache(struct nfp_cpp *cpp);
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
-
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
-
struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
u32 cpp_id,
const char *name,
@@ -229,6 +230,9 @@ struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address,
unsigned long size);
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 cpp_id,
+ unsigned long long address, unsigned long size);
void nfp_cpp_area_free(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
@@ -238,8 +242,6 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
-int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
- unsigned long long offset, unsigned long size);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
@@ -277,6 +279,10 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value);
+u8 __iomem *
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
+ u64 addr, unsigned long size, struct nfp_cpp_area **area);
+
struct nfp_cpp_mutex;
int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
@@ -289,6 +295,17 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+/**
+ * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
+ * @cpp: CPP handle
+ *
+ * Return: PCI unit for the NFP CPP handle
+ */
+static inline u8 nfp_cppcore_pcie_unit(struct nfp_cpp *cpp)
+{
+ return NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp));
+}
+
struct nfp_cpp_explicit;
struct nfp_cpp_explicit_command {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index e2abba4c3a3f..04dd5758ecf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -76,10 +76,6 @@ struct nfp_cpp_resource {
* @serial: chip serial number
* @imb_cat_table: CPP Mapping Table
*
- * Following fields can be used only in probe() or with rtnl held:
- * @hwinfo: HWInfo database fetched from the device
- * @rtsym: firmware run time symbols
- *
* Following fields use explicit locking:
* @resource_list: NFP CPP resource list
* @resource_lock: protects @resource_list
@@ -107,9 +103,6 @@ struct nfp_cpp {
struct mutex area_cache_mutex;
struct list_head area_cache_list;
-
- void *hwinfo;
- void *rtsym;
};
/* Element of the area_cache_list */
@@ -233,9 +226,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
if (cpp->op->free)
cpp->op->free(cpp);
- kfree(cpp->hwinfo);
- kfree(cpp->rtsym);
-
device_unregister(&cpp->dev);
kfree(cpp);
@@ -276,39 +266,6 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
return sizeof(cpp->serial);
}
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
-{
- return cpp->hwinfo;
-}
-
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
-{
- cpp->hwinfo = val;
-}
-
-void *nfp_rtsym_cache(struct nfp_cpp *cpp)
-{
- return cpp->rtsym;
-}
-
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
-{
- cpp->rtsym = val;
-}
-
-/**
- * nfp_nffw_cache_flush() - Flush cached firmware information
- * @cpp: NFP CPP handle
- *
- * Flush cached firmware information. This function should be called
- * every time firmware is loaded on unloaded.
- */
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
-{
- kfree(nfp_rtsym_cache(cpp));
- nfp_rtsym_cache_set(cpp, NULL);
-}
-
/**
* nfp_cpp_area_alloc_with_name() - allocate a new CPP area
* @cpp: CPP device handle
@@ -404,6 +361,41 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
}
/**
+ * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
+ * @cpp: CPP handle
+ * @name: Name of region
+ * @dest: CPP id
+ * @address: Start address on CPP target
+ * @size: Size of area
+ *
+ * Allocate and initialize a CPP area structure, and lock it down so
+ * that it can be accessed directly.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * NOTE: The area must also be 'released' when the structure is freed.
+ *
+ * Return: NFP CPP Area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+
+ area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
+ if (!area)
+ return NULL;
+
+ if (nfp_cpp_area_acquire(area)) {
+ nfp_cpp_area_free(area);
+ return NULL;
+ }
+
+ return area;
+}
+
+/**
* nfp_cpp_area_free() - free up the CPP area
* @area: CPP area handle
*
@@ -579,27 +571,6 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
}
/**
- * nfp_cpp_area_check_range() - check if address range fits in CPP area
- * @area: CPP area handle
- * @offset: offset into CPP target
- * @length: size of address range in bytes
- *
- * Check if address range fits within CPP area. Return 0 if area
- * fits or -EFAULT on error.
- *
- * Return: 0, or -ERRNO
- */
-int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
- unsigned long long offset, unsigned long length)
-{
- if (offset < area->offset ||
- offset + length > area->offset + area->size)
- return -EFAULT;
-
- return 0;
-}
-
-/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
@@ -924,18 +895,9 @@ area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
mutex_unlock(&cpp->area_cache_mutex);
}
-/**
- * nfp_cpp_read() - read from CPP target
- * @cpp: CPP handle
- * @destination: CPP id
- * @address: offset into CPP target
- * @kernel_vaddr: kernel buffer for result
- * @length: number of bytes to read
- *
- * Return: length of io, or -ERRNO
- */
-int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
- unsigned long long address, void *kernel_vaddr, size_t length)
+static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address, void *kernel_vaddr,
+ size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
@@ -968,18 +930,43 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
}
/**
- * nfp_cpp_write() - write to CPP target
+ * nfp_cpp_read() - read from CPP target
* @cpp: CPP handle
* @destination: CPP id
* @address: offset into CPP target
- * @kernel_vaddr: kernel buffer to read from
- * @length: number of bytes to write
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
*
* Return: length of io, or -ERRNO
*/
-int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
- unsigned long long address,
- const void *kernel_vaddr, size_t length)
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address, void *kernel_vaddr,
+ size_t length)
+{
+ size_t n, offset;
+ int ret;
+
+ for (offset = 0; offset < length; offset += n) {
+ unsigned long long r_addr = address + offset;
+
+ /* make first read smaller to align to safe window */
+ n = min_t(size_t, length - offset,
+ ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
+
+ ret = __nfp_cpp_read(cpp, destination, address + offset,
+ kernel_vaddr + offset, n);
+ if (ret < 0)
+ return ret;
+ if (ret != n)
+ return offset + n;
+ }
+
+ return length;
+}
+
+static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address,
+ const void *kernel_vaddr, size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
@@ -1011,6 +998,41 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
return err;
}
+/**
+ * nfp_cpp_write() - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address,
+ const void *kernel_vaddr, size_t length)
+{
+ size_t n, offset;
+ int ret;
+
+ for (offset = 0; offset < length; offset += n) {
+ unsigned long long w_addr = address + offset;
+
+ /* make first write smaller to align to safe window */
+ n = min_t(size_t, length - offset,
+ ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
+
+ ret = __nfp_cpp_write(cpp, destination, address + offset,
+ kernel_vaddr + offset, n);
+ if (ret < 0)
+ return ret;
+ if (ret != n)
+ return offset + n;
+ }
+
+ return length;
+}
+
/* Return the correct CPP address, and fixup xpb_addr as needed. */
static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 0ba0379b8f75..ab86bceb93f2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -279,3 +279,43 @@ exit_release:
return err;
}
+
+/**
+ * nfp_cpp_map_area() - Helper function to map an area
+ * @cpp: NFP CPP handler
+ * @name: Name for the area
+ * @domain: CPP domain
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (output)
+ *
+ * Map an area of IOMEM access. To undo the effect of this function call
+ * @nfp_cpp_area_release_free(*area).
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+u8 __iomem *
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
+ u64 addr, unsigned long size, struct nfp_cpp_area **area)
+{
+ u8 __iomem *res;
+ u32 dest;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
+
+ *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size);
+ if (!*area)
+ goto err_eio;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res)
+ goto err_release_free;
+
+ return res;
+
+err_release_free:
+ nfp_cpp_area_release_free(*area);
+err_eio:
+ return (u8 __iomem *)ERR_PTR(-EIO);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 8d8f311ffa6e..4f24aff1e772 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -178,7 +178,8 @@ hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
return hwinfo_db_walk(cpp, db, size);
}
-static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+static struct nfp_hwinfo *
+hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
{
struct nfp_hwinfo *header;
struct nfp_resource *res;
@@ -196,7 +197,7 @@ static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
nfp_resource_release(res);
if (*cpp_size < HWINFO_SIZE_MIN)
- return -ENOENT;
+ return NULL;
} else if (PTR_ERR(res) == -ENOENT) {
/* Try getting the HWInfo table from the 'classic' location */
cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
@@ -204,101 +205,86 @@ static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
cpp_addr = 0x30000;
*cpp_size = 0x0e000;
} else {
- return PTR_ERR(res);
+ return NULL;
}
db = kmalloc(*cpp_size + 1, GFP_KERNEL);
if (!db)
- return -ENOMEM;
+ return NULL;
err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
- if (err != *cpp_size) {
- kfree(db);
- return err < 0 ? err : -EIO;
- }
+ if (err != *cpp_size)
+ goto exit_free;
header = (void *)db;
- if (nfp_hwinfo_is_updating(header)) {
- kfree(db);
- return -EBUSY;
- }
+ if (nfp_hwinfo_is_updating(header))
+ goto exit_free;
if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
le32_to_cpu(header->version));
- kfree(db);
- return -EINVAL;
+ goto exit_free;
}
/* NULL-terminate for safety */
db[*cpp_size] = '\0';
- nfp_hwinfo_cache_set(cpp, db);
-
- return 0;
+ return (void *)db;
+exit_free:
+ kfree(db);
+ return NULL;
}
-static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+static struct nfp_hwinfo *hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
{
const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
+ struct nfp_hwinfo *db;
int err;
for (;;) {
const unsigned long start_time = jiffies;
- err = hwinfo_try_fetch(cpp, hwdb_size);
- if (!err)
- return 0;
+ db = hwinfo_try_fetch(cpp, hwdb_size);
+ if (db)
+ return db;
err = msleep_interruptible(100);
if (err || time_after(start_time, wait_until)) {
nfp_err(cpp, "NFP access error\n");
- return -EIO;
+ return NULL;
}
}
}
-static int nfp_hwinfo_load(struct nfp_cpp *cpp)
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp)
{
struct nfp_hwinfo *db;
size_t hwdb_size = 0;
int err;
- err = hwinfo_fetch(cpp, &hwdb_size);
- if (err)
- return err;
+ db = hwinfo_fetch(cpp, &hwdb_size);
+ if (!db)
+ return NULL;
- db = nfp_hwinfo_cache(cpp);
err = hwinfo_db_validate(cpp, db, hwdb_size);
if (err) {
kfree(db);
- nfp_hwinfo_cache_set(cpp, NULL);
- return err;
+ return NULL;
}
- return 0;
+ return db;
}
/**
* nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
- * @cpp: NFP CPP handle
+ * @hwinfo: NFP HWinfo table
* @lookup: HWInfo name to search for
*
* Return: Value of the HWInfo name, or NULL
*/
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
{
const char *key, *val, *end;
- struct nfp_hwinfo *hwinfo;
- int err;
-
- hwinfo = nfp_hwinfo_cache(cpp);
- if (!hwinfo) {
- err = nfp_hwinfo_load(cpp);
- if (err)
- return NULL;
- hwinfo = nfp_hwinfo_cache(cpp);
- }
if (!hwinfo || !lookup)
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
index 3d15dd03647e..5f193fe2d69e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -141,6 +141,8 @@ const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp)
return NULL;
}
+ mip->name[sizeof(mip->name) - 1] = 0;
+
return mip;
}
@@ -149,6 +151,11 @@ void nfp_mip_close(const struct nfp_mip *mip)
kfree(mip);
}
+const char *nfp_mip_name(const struct nfp_mip *mip)
+{
+ return mip->name;
+}
+
/**
* nfp_mip_symtab() - Get the address and size of the MIP symbol table
* @mip: MIP handle
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index 8a99c189efa8..f7b958181126 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -195,7 +195,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
*/
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
- unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
+ unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
unsigned int timeout_ms = 1;
int err;
@@ -214,12 +215,16 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
return -ERESTARTSYS;
if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
+ warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
nfp_warn(mutex->cpp,
"Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
mutex->depth,
mutex->target, mutex->address, mutex->key);
}
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_err(mutex->cpp, "Error: mutex wait timed out\n");
+ return -EBUSY;
+ }
}
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index 988badd230d1..c9724fb7ea4b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -55,6 +55,7 @@ struct nfp_mip;
const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
void nfp_mip_close(const struct nfp_mip *mip);
+const char *nfp_mip_name(const struct nfp_mip *mip);
void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
@@ -87,9 +88,20 @@ struct nfp_rtsym {
int domain;
};
-int nfp_rtsym_count(struct nfp_cpp *cpp);
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error);
+u8 __iomem *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
+ unsigned int min_size, struct nfp_cpp_area **area);
#endif /* NFP_NFFW_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 2fa9247bb23d..37364555c42b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -93,6 +93,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
};
@@ -419,6 +420,14 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
if (err < 0)
return err;
}
+ /* Zero out remaining part of the buffer */
+ if (out_buf && out_size && out_size > in_size) {
+ memset(out_buf, 0, out_size - in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size,
+ out_buf, out_size - in_size);
+ if (err < 0)
+ return err;
+ }
ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
if (ret < 0)
@@ -465,13 +474,7 @@ int nfp_nsp_wait(struct nfp_nsp *state)
int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
{
- int err;
-
- err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
-
- nfp_nffw_cache_flush(state->cpp);
-
- return err;
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
}
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
@@ -498,3 +501,10 @@ int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
buf, size);
}
+
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask,
+ NULL, 0, buf, size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 36b21e4dc56d..e2f028027c6f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -76,6 +76,7 @@ enum nfp_eth_aneg {
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
+ * @max_index: max of @index fields of all @ports
* @ports: table of ports
*
* @eth_index: port index according to legacy ethX numbering
@@ -96,10 +97,12 @@ enum nfp_eth_aneg {
* @override_changed: is media reconfig pending?
*
* @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
* @is_split: is interface part of a split port
*/
struct nfp_eth_table {
unsigned int count;
+ unsigned int max_index;
struct nfp_eth_table_port {
unsigned int eth_index;
unsigned int index;
@@ -127,6 +130,8 @@ struct nfp_eth_table {
/* Computed fields */
u8 port_type;
+ unsigned int port_lanes;
+
bool is_split;
} ports[0];
};
@@ -157,6 +162,7 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
* @primary: version of primarary bootloader
* @secondary: version id of secondary bootloader
* @nsp: version id of NSP
+ * @sensor_mask: mask of present sensors available on NIC
*/
struct nfp_nsp_identify {
char version[40];
@@ -167,8 +173,19 @@ struct nfp_nsp_identify {
u16 primary;
u16 secondary;
u16 nsp;
+ u64 sensor_mask;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+enum nfp_nsp_sensor_id {
+ NFP_SENSOR_CHIP_TEMPERATURE,
+ NFP_SENSOR_ASSEMBLY_POWER,
+ NFP_SENSOR_ASSEMBLY_12V_POWER,
+ NFP_SENSOR_ASSEMBLY_3V3_POWER,
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index e7a263de3731..5d362f87af08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -46,7 +46,8 @@ struct nsp_identify {
__le16 primary;
__le16 secondary;
__le16 nsp;
- __le16 reserved;
+ u8 reserved[6];
+ __le64 sensor_mask;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
@@ -82,8 +83,52 @@ struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
nspi->primary = le16_to_cpu(ni->primary);
nspi->secondary = le16_to_cpu(ni->secondary);
nspi->nsp = le16_to_cpu(ni->nsp);
+ nspi->sensor_mask = le64_to_cpu(ni->sensor_mask);
exit_free:
kfree(ni);
return nspi;
}
+
+struct nfp_sensors {
+ __le32 chip_temp;
+ __le32 assembly_power;
+ __le32 assembly_12v_power;
+ __le32 assembly_3v3_power;
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val)
+{
+ struct nfp_sensors s;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s));
+ nfp_nsp_close(nsp);
+
+ if (ret < 0)
+ return ret;
+
+ switch (id) {
+ case NFP_SENSOR_CHIP_TEMPERATURE:
+ *val = le32_to_cpu(s.chip_temp);
+ break;
+ case NFP_SENSOR_ASSEMBLY_POWER:
+ *val = le32_to_cpu(s.assembly_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_12V_POWER:
+ *val = le32_to_cpu(s.assembly_12v_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_3V3_POWER:
+ *val = le32_to_cpu(s.assembly_3v3_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 639438d8313a..c2bc36e8649f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -186,17 +186,21 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
}
static void
-nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
{
unsigned int i, j;
- for (i = 0; i < table->count; i++)
+ for (i = 0; i < table->count; i++) {
+ table->max_index = max(table->max_index, table->ports[i].index);
+
for (j = 0; j < table->count; j++) {
- if (i == j)
- continue;
if (table->ports[i].label_port !=
table->ports[j].label_port)
continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
if (table->ports[i].label_subport ==
table->ports[j].label_subport)
nfp_warn(cpp,
@@ -205,8 +209,8 @@ nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
table->ports[i].label_subport);
table->ports[i].is_split = true;
- break;
}
+ }
}
static void
@@ -289,7 +293,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
nfp_eth_port_translate(nsp, &entries[i], i,
&table->ports[j++]);
- nfp_eth_mark_split_ports(cpp, table);
+ nfp_eth_calc_port_geometry(cpp, table);
for (i = 0; i < table->count; i++)
nfp_eth_calc_port_type(cpp, &table->ports[i]);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 2d15a7c9d0de..072612263dab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -181,7 +181,8 @@ err_unlock_dev:
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
{
- unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
+ unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
struct nfp_cpp_mutex *dev_mutex;
struct nfp_resource *res;
int err;
@@ -214,10 +215,15 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
}
if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
+ warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
name);
}
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_err(cpp, "Error: resource %s timed out\n", name);
+ err = -EBUSY;
+ goto err_free;
+ }
}
nfp_cpp_mutex_free(dev_mutex);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 0e3870ecfb8c..ecda474ac7c3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -65,7 +65,8 @@ struct nfp_rtsym_entry {
__le32 size_lo;
};
-struct nfp_rtsym_cache {
+struct nfp_rtsym_table {
+ struct nfp_cpp *cpp;
int num;
char *strtab;
struct nfp_rtsym symtab[];
@@ -78,7 +79,7 @@ static int nfp_meid(u8 island_id, u8 menum)
}
static void
-nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, u32 strtab_size,
struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
{
sw->type = fw->type;
@@ -106,26 +107,36 @@ nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
sw->domain = -1;
}
-static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_table *rtbl;
+ const struct nfp_mip *mip;
+
+ mip = nfp_mip_open(cpp);
+ rtbl = __nfp_rtsym_table_read(cpp, mip);
+ nfp_mip_close(mip);
+
+ return rtbl;
+}
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
{
const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
NFP_ISL_EMEM0;
u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
struct nfp_rtsym_entry *rtsymtab;
- struct nfp_rtsym_cache *cache;
- const struct nfp_mip *mip;
+ struct nfp_rtsym_table *cache;
int err, n, size;
- mip = nfp_mip_open(cpp);
if (!mip)
- return -EIO;
+ return NULL;
nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
- nfp_mip_close(mip);
if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
- return -ENXIO;
+ return NULL;
/* Align to 64 bits */
symtab_size = round_up(symtab_size, 8);
@@ -133,27 +144,26 @@ static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
if (!rtsymtab)
- return -ENOMEM;
+ return NULL;
size = sizeof(*cache);
size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
size += strtab_size + 1;
cache = kmalloc(size, GFP_KERNEL);
- if (!cache) {
- err = -ENOMEM;
- goto err_free_rtsym_raw;
- }
+ if (!cache)
+ goto exit_free_rtsym_raw;
+ cache->cpp = cpp;
cache->num = symtab_size / sizeof(*rtsymtab);
cache->strtab = (void *)&cache->symtab[cache->num];
err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
if (err != symtab_size)
- goto err_free_cache;
+ goto exit_free_cache;
err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
if (err != strtab_size)
- goto err_free_cache;
+ goto exit_free_cache;
cache->strtab[strtab_size] = '\0';
for (n = 0; n < cache->num; n++)
@@ -161,97 +171,71 @@ static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
&cache->symtab[n], &rtsymtab[n]);
kfree(rtsymtab);
- nfp_rtsym_cache_set(cpp, cache);
- return 0;
-err_free_cache:
+ return cache;
+
+exit_free_cache:
kfree(cache);
-err_free_rtsym_raw:
+exit_free_rtsym_raw:
kfree(rtsymtab);
- return err;
-}
-
-static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
-{
- struct nfp_rtsym_cache *cache;
- int err;
-
- cache = nfp_rtsym_cache(cpp);
- if (cache)
- return cache;
-
- err = nfp_rtsymtab_probe(cpp);
- if (err < 0)
- return ERR_PTR(err);
-
- return nfp_rtsym_cache(cpp);
+ return NULL;
}
/**
* nfp_rtsym_count() - Get the number of RTSYM descriptors
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
*
- * Return: Number of RTSYM descriptors, or -ERRNO
+ * Return: Number of RTSYM descriptors
*/
-int nfp_rtsym_count(struct nfp_cpp *cpp)
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
{
- struct nfp_rtsym_cache *cache;
-
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
- return PTR_ERR(cache);
-
- return cache->num;
+ if (!rtbl)
+ return -EINVAL;
+ return rtbl->num;
}
/**
* nfp_rtsym_get() - Get the Nth RTSYM descriptor
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @idx: Index (0-based) of the RTSYM descriptor
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
{
- struct nfp_rtsym_cache *cache;
-
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
+ if (!rtbl)
return NULL;
-
- if (idx >= cache->num)
+ if (idx >= rtbl->num)
return NULL;
- return &cache->symtab[idx];
+ return &rtbl->symtab[idx];
}
/**
* nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @name: Symbol name
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
{
- struct nfp_rtsym_cache *cache;
int n;
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
+ if (!rtbl)
return NULL;
- for (n = 0; n < cache->num; n++) {
- if (strcmp(name, cache->symtab[n].name) == 0)
- return &cache->symtab[n];
- }
+ for (n = 0; n < rtbl->num; n++)
+ if (strcmp(name, rtbl->symtab[n].name) == 0)
+ return &rtbl->symtab[n];
return NULL;
}
/**
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @name: Symbol name
* @error: Poniter to error code (optional)
*
@@ -261,14 +245,15 @@ const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
*
* Return: value read, on error sets the error and returns ~0ULL.
*/
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error)
{
const struct nfp_rtsym *sym;
u32 val32, id;
u64 val;
int err;
- sym = nfp_rtsym_lookup(cpp, name);
+ sym = nfp_rtsym_lookup(rtbl, name);
if (!sym) {
err = -ENOENT;
goto exit;
@@ -278,14 +263,14 @@ u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
switch (sym->size) {
case 4:
- err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
+ err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
val = val32;
break;
case 8:
- err = nfp_cpp_readq(cpp, id, sym->addr, &val);
+ err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
break;
default:
- nfp_err(cpp,
+ nfp_err(rtbl->cpp,
"rtsym '%s' unsupported or non-scalar size: %lld\n",
name, sym->size);
err = -EINVAL;
@@ -304,3 +289,30 @@ exit:
return ~0ULL;
return val;
}
+
+u8 __iomem *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
+ u8 __iomem *mem;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym)
+ return (u8 __iomem *)ERR_PTR(-ENOENT);
+
+ if (sym->size < min_size) {
+ nfp_err(rtbl->cpp, "Symbol %s too small\n", name);
+ return (u8 __iomem *)ERR_PTR(-EINVAL);
+ }
+
+ mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target,
+ sym->addr, sym->size, area);
+ if (IS_ERR(mem)) {
+ nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n",
+ name, PTR_ERR(mem));
+ return mem;
+ }
+
+ return mem;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
new file mode 100644
index 000000000000..520684242b7d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+
+static int nfp_nic_init(struct nfp_app *app)
+{
+ struct nfp_pf *pf = app->pf;
+
+ if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) {
+ nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
+ pf->max_data_vnics, pf->eth_tbl->count);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct nfp_app_type app_nic = {
+ .id = NFP_APP_CORE_NIC,
+ .name = "nic",
+
+ .init = nfp_nic_init,
+ .vnic_init = nfp_app_nic_vnic_init,
+};
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 159564d8dcdb..89ab786da25f 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -868,7 +868,10 @@ static int w90p910_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&ether->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ether->mii, cmd);
+
+ return 0;
}
static int w90p910_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 9c7ffd649e9a..08381ef8bdb4 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -919,7 +919,6 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
struct sk_buff *skb;
u32 rxconsidx, len, ethst;
struct rx_status_t *prxstat;
- u8 *prdbuf;
int rx_done = 0;
/* Get the current RX buffer indexes */
@@ -959,11 +958,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
if (!skb) {
ndev->stats.rx_dropped++;
} else {
- prdbuf = skb_put(skb, len);
-
/* Copy packet from buffer */
- memcpy(prdbuf, pldat->rx_buff_v +
- rxconsidx * ENET_MAXF_SIZE, len);
+ skb_put_data(skb,
+ pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
+ len);
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 21093276d2b7..731ce1e419e4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -85,9 +85,8 @@ static int pch_gbe_get_link_ksettings(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
u32 supported, advertising;
- int ret;
- ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
+ mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
ethtool_convert_link_mode_to_legacy_u32(&supported,
ecmd->link_modes.supported);
@@ -104,7 +103,8 @@ static int pch_gbe_get_link_ksettings(struct net_device *netdev,
if (!netif_carrier_ok(adapter->netdev))
ecmd->base.speed = SPEED_UNKNOWN;
- return ret;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 8b026dbf0d8d..482b85e4d665 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1495,8 +1495,8 @@ static int hamachi_rx(struct net_device *dev)
hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
- + entry*sizeof(*desc), pkt_len);
+ skb_put_data(skb, hmp->rx_ring_dma
+ + entry*sizeof(*desc), pkt_len);
#endif
pci_dma_sync_single_for_device(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index e30676515529..6cec2a6a3dcc 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -174,7 +174,6 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
{
int err = 0, i;
u32 *template, *tmp_buf;
- struct netxen_minidump_template_hdr *hdr;
err = netxen_get_minidump_template_size(adapter);
if (err) {
adapter->mdump.fw_supports_md = 0;
@@ -218,8 +217,6 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
template = (u32 *) adapter->mdump.md_template;
for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
*template++ = __le32_to_cpu(*tmp_buf++);
- hdr = (struct netxen_minidump_template_hdr *)
- adapter->mdump.md_template;
adapter->mdump.md_capture_buff = NULL;
adapter->mdump.fw_supports_md = 1;
adapter->mdump.md_enabled = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index a996801d442d..66ff15d08bad 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -44,20 +45,6 @@ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
void __iomem *addr, u32 data);
static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
void __iomem *addr);
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel(((u32) (val)), (addr));
- writel(((u32) (val >> 32)), (addr + 4));
-}
-#endif
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
((adapter)->ahw.pci_base0 + (off))
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 974929dcc74e..82dd47068e18 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_QED_RDMA) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 2ab1aab7c3fe..91003bc6f00b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -54,7 +54,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 10
+#define QED_REVISION_VERSION 11
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
@@ -92,7 +92,7 @@ enum qed_mcp_protocol_type;
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
- (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
@@ -210,14 +210,16 @@ struct qed_tunn_update_params {
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
- * 2. The Ethernet personality may support also the RoCE protocol
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
*/
enum qed_pci_personality {
QED_PCI_ETH,
QED_PCI_FCOE,
QED_PCI_ISCSI,
QED_PCI_ETH_ROCE,
- QED_PCI_DEFAULT /* default in shmem */
+ QED_PCI_ETH_IWARP,
+ QED_PCI_ETH_RDMA,
+ QED_PCI_DEFAULT, /* default in shmem */
};
/* All VFs are symmetric, all counters are PF + all VFs */
@@ -277,6 +279,7 @@ enum qed_dev_cap {
QED_DEV_CAP_FCOE,
QED_DEV_CAP_ISCSI,
QED_DEV_CAP_ROCE,
+ QED_DEV_CAP_IWARP,
};
enum qed_wol_support {
@@ -286,7 +289,24 @@ enum qed_wol_support {
struct qed_hw_info {
/* PCI personality */
- enum qed_pci_personality personality;
+ enum qed_pci_personality personality;
+#define QED_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH || \
+ QED_IS_RDMA_PERSONALITY(dev))
+#define QED_IS_FCOE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_FCOE)
+#define QED_IS_ISCSI_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ISCSI)
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
@@ -412,6 +432,11 @@ struct qed_fw_data {
u32 init_ops_size;
};
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -495,10 +520,6 @@ struct qed_hwfn {
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
- /* Array of sb_info of all status blocks */
- struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
-
struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
@@ -537,6 +558,9 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+ /* L2-related */
+ struct qed_l2_info *p_l2_info;
+
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
@@ -548,7 +572,6 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
- struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -598,16 +621,11 @@ struct qed_dev {
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
-#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
- CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
- QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
-
u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
@@ -621,7 +639,6 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
-#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
@@ -633,7 +650,7 @@ struct qed_dev {
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
- u8 num_ports_in_engines;
+ u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
@@ -644,7 +661,6 @@ struct qed_dev {
int pcie_width;
int pcie_speed;
- u8 ver_str[VER_SIZE];
/* Add MF related configuration */
u8 mcp_rev;
@@ -763,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PURE_LB_TC 8
-#define OOO_LB_TC 9
+#define PKT_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
@@ -773,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev);
int qed_device_get_port_id(struct qed_dev *cdev);
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 694845793af2..af106be8cc08 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -135,7 +135,6 @@ struct qed_tid_seg {
struct qed_conn_type_cfg {
u32 cid_count;
- u32 cid_start;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
@@ -222,6 +221,9 @@ struct qed_cxt_mngr {
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
@@ -244,14 +246,16 @@ struct qed_cxt_mngr {
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
- type == PROTOCOLID_FCOE;
+ type == PROTOCOLID_FCOE ||
+ type == PROTOCOLID_IWARP;
}
static bool tm_cid_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE ||
- type == PROTOCOLID_ROCE;
+ type == PROTOCOLID_ROCE ||
+ type == PROTOCOLID_IWARP;
}
static bool tm_tid_proto(enum protocol_type type)
@@ -851,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
if (!excess_lines)
return 0;
- if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
return 0;
p_mngr = p_hwfn->p_cxt_mngr;
@@ -1031,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
- if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
return 0;
@@ -1121,45 +1125,76 @@ ilt_shadow_fail:
static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
kfree(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ kfree(p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
}
}
+static int
+qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
+ u32 type,
+ u32 cid_start,
+ u32 cid_count, struct qed_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return 0;
+
+ size = DIV_ROUND_UP(cid_count,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_map->cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_map->cid_map)
+ return -ENOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return 0;
+}
+
static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0;
- u32 type;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 size;
-
- if (cid_cnt == 0)
- continue;
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct qed_cid_acquired_map *p_map;
- size = DIV_ROUND_UP(cid_cnt,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long);
- p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
- if (!p_mngr->acquired[type].cid_map)
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
goto cid_map_fail;
- p_mngr->acquired[type].max_count = cid_cnt;
- p_mngr->acquired[type].start_cid = start_cid;
-
- p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+ /* Handle VF maps */
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (qed_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf, p_map))
+ goto cid_map_fail;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_CXT,
- "Type %08x start: %08x count %08x\n",
- type, p_mngr->acquired[type].start_cid,
- p_mngr->acquired[type].max_count);
- start_cid += cid_cnt;
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
}
return 0;
@@ -1265,19 +1300,36 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ struct qed_conn_type_cfg *p_cfg;
int type;
+ u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ sizeof(unsigned long) *
+ BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ memset(p_map->cid_map, 0, len);
+ }
- if (cid_cnt == 0)
+ if (!p_cfg->cids_per_vf)
continue;
- memset(p_mngr->acquired[type].cid_map, 0,
- DIV_ROUND_UP(cid_cnt,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long));
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ sizeof(unsigned long) *
+ BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ memset(p_map->cid_map, 0, len);
+ }
}
}
@@ -1783,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
tm_offset += tm_iids.pf_tids[i];
}
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn))
active_seg_mask = 0;
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
@@ -1841,91 +1893,145 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_prs_init_pf(p_hwfn);
}
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type, u32 *p_cid)
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
u32 rel_cid;
- if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ if (type >= MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
return -EINVAL;
}
- rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
- p_mngr->acquired[type].max_count);
+ if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
+ return -EINVAL;
+ }
- if (rel_cid >= p_mngr->acquired[type].max_count) {
+ /* Determine the right map to take this CID from */
+ if (vfid == QED_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (!p_map->cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
+
+ if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
- __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+ __set_bit(rel_cid, p_map->cid_map);
- *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
return 0;
}
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid)
+{
+ return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
+}
+
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid, enum protocol_type *p_type)
+ u32 cid,
+ u8 vfid,
+ enum protocol_type *p_type,
+ struct qed_cid_acquired_map **pp_map)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct qed_cid_acquired_map *p_map;
- enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
- for (p = 0; p < MAX_CONN_TYPES; p++) {
- p_map = &p_mngr->acquired[p];
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == QED_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
- if (!p_map->cid_map)
+ if (!((*pp_map)->cid_map))
continue;
- if (cid >= p_map->start_cid &&
- cid < p_map->start_cid + p_map->max_count)
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count)
break;
}
- *p_type = p;
- if (p == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
- return false;
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
}
- rel_cid = cid - p_map->start_cid;
- if (!test_bit(rel_cid, p_map->cid_map)) {
- DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
- return false;
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
+ cid, vfid);
+ goto fail;
}
+
return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = NULL;
+ return false;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
{
- struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map = NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
+ if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
/* Test acquired and find matching per-protocol map */
- b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
if (!b_acquired)
return;
- rel_cid = cid - p_mngr->acquired[type].start_cid;
- __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+ rel_cid = cid - p_map->start_cid;
+ clear_bit(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
}
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map = NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
- b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ QED_CXT_PF_CID, &type, &p_map);
if (!b_acquired)
return -EINVAL;
@@ -1964,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_IWARP:
+ /* Each QP requires one connection */
+ num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
+ proto = PROTOCOLID_IWARP;
+ break;
case QED_PCI_ETH_ROCE:
num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
num_cons = num_qps * 2; /* each QP requires two connections */
@@ -1999,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_RDMA:
+ case QED_PCI_ETH_IWARP:
case QED_PCI_ETH_ROCE:
{
qed_rdma_set_pf_params(p_hwfn,
@@ -2012,8 +2125,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
struct qed_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 1);
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons =
+ ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
@@ -2236,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
last_cid_allocated - 1);
if (!p_hwfn->b_rdma_enabled_in_prs) {
- /* Enable RoCE search */
+ /* Enable RDMA search */
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
p_hwfn->b_rdma_enabled_in_prs = true;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 53ad532dc212..17836349a274 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -54,19 +54,6 @@ struct qed_tid_mem {
};
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
- *
- * @param p_hwfn
- * @param type
- * @param p_cid
- *
- * @return int
- */
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid);
-
-/**
* @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
@@ -195,14 +182,51 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#define QED_CXT_PF_CID (0xff)
+
/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid);
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ */
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid);
+
+/**
+ * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ *
+ * @return int
+ */
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid);
+
int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type elem_type, u32 iid);
u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d883ad5bec6d..eaca4578435d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -44,6 +44,7 @@
#include "qed_hsi.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#include "qed_rdma.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@@ -191,17 +192,19 @@ static void
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info,
bool enable,
- bool update,
u8 prio,
u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
/* PF update ramrod data */
- p_data->arr[type].update = update;
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
+ if (enable)
+ p_data->arr[type].update = UPDATE_DCB;
+ else
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
/* QM reconf data */
if (p_info->personality == personality)
@@ -213,7 +216,6 @@ static void
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hwfn *p_hwfn,
bool enable,
- bool update,
u8 prio, u8 tc, enum dcbx_protocol_type type)
{
struct qed_hw_info *p_info = &p_hwfn->hw_info;
@@ -231,7 +233,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
personality = qed_dcbx_app_update[i].personality;
name = qed_dcbx_app_update[i].name;
- qed_dcbx_set_params(p_data, p_info, enable, update,
+ qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
}
}
@@ -304,22 +306,11 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
*/
enable = !(type == DCBX_PROTOCOL_ETH);
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
- /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
- * data for RoCE-v2 not the default app data.
- */
- if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
- p_data->arr[DCBX_PROTOCOL_ROCE].update) {
- tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
- priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
- qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
- priority, tc, DCBX_PROTOCOL_ROCE_V2);
- }
-
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
@@ -332,8 +323,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = !(type == DCBX_PROTOCOL_ETH);
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
@@ -902,10 +893,33 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
/* update storm FW with negotiation results */
qed_sp_pf_update(p_hwfn);
+
+ /* for roce PFs, we may want to enable/disable DPM
+ * when DCBx change occurs
+ */
+ if (p_hwfn->hw_info.personality ==
+ QED_PCI_ETH_ROCE)
+ qed_roce_dpm_dcbx(p_hwfn, p_ptt);
}
}
qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ struct qed_dcbx_results *p_data;
+ u16 val;
+
+ /* Configure in NIG which protocols support EDPM and should
+ * honor PFC.
+ */
+ p_data = &p_hwfn->p_dcbx_info->results;
+ val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+ (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+ val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+ val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
+ }
+
qed_dcbx_aen(p_hwfn, type);
return rc;
@@ -923,6 +937,7 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
+ p_hwfn->p_dcbx_info = NULL;
}
static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
@@ -944,17 +959,18 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
- p_dest->update_fcoe_dcb_data_flag = update_flag;
+ p_dest->update_fcoe_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_roce_dcb_data_mode = update_flag;
+
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_rroce_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
- p_dest->update_iscsi_dcb_data_flag = update_flag;
+ p_dest->update_iscsi_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
@@ -1458,7 +1474,7 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
break;
case DCB_CAP_ATTR_DCBX:
*cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_VER_IEEE);
+ DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
break;
default:
*cap = false;
@@ -1532,6 +1548,8 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
mode |= DCB_CAP_DCBX_VER_IEEE;
if (dcbx_info->operational.cee)
mode |= DCB_CAP_DCBX_VER_CEE;
+ if (dcbx_info->operational.local)
+ mode |= DCB_CAP_DCBX_STATIC;
DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
kfree(dcbx_info);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 414e26268f3a..5feb90e049e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -52,7 +52,7 @@ enum qed_mib_read_type {
struct qed_dcbx_app_data {
bool enable; /* DCB enabled */
- bool update; /* Update indication */
+ u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a672f6a860dc..03c3cf77aaff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -15,13 +15,6 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-/* Chip IDs enum */
-enum chip_ids {
- CHIP_BB_B0,
- CHIP_K2,
- MAX_CHIP_IDS
-};
-
/* Memory groups enum */
enum mem_groups {
MEM_GROUP_PXP_MEM,
@@ -33,7 +26,6 @@ enum mem_groups {
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
MEM_GROUP_IOR,
MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
@@ -45,6 +37,7 @@ enum mem_groups {
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_PBUF,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
MEM_GROUP_IGU_MEM,
@@ -66,7 +59,6 @@ static const char * const s_mem_group_names[] = {
"BRB_MEM",
"PRS_MEM",
"SDM_MEM",
- "PBUF",
"IOR",
"RAM",
"BTB_RAM",
@@ -78,6 +70,7 @@ static const char * const s_mem_group_names[] = {
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "PBUF",
"MULD_MEM",
"BTB_MEM",
"IGU_MEM",
@@ -88,48 +81,59 @@ static const char * const s_mem_group_names[] = {
};
/* Idle check conditions */
-static u32 cond4(const u32 *r, const u32 *imm)
+
+static u32 cond5(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
}
-static u32 cond6(const u32 *r, const u32 *imm)
+static u32 cond7(const u32 *r, const u32 *imm)
{
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond5(const u32 *r, const u32 *imm)
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
}
-static u32 cond8(const u32 *r, const u32 *imm)
+static u32 cond9(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) !=
(((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
}
-static u32 cond9(const u32 *r, const u32 *imm)
+static u32 cond10(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
}
-static u32 cond1(const u32 *r, const u32 *imm)
+static u32 cond4(const u32 *r, const u32 *imm)
{
return (r[0] & ~imm[0]) != imm[1];
}
static u32 cond0(const u32 *r, const u32 *imm)
{
+ return (r[0] & ~r[1]) != imm[0];
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
return r[0] != imm[0];
}
-static u32 cond10(const u32 *r, const u32 *imm)
+static u32 cond11(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] == imm[0];
}
-static u32 cond11(const u32 *r, const u32 *imm)
+static u32 cond12(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] > imm[0];
}
@@ -139,12 +143,12 @@ static u32 cond3(const u32 *r, const u32 *imm)
return r[0] != r[1];
}
-static u32 cond12(const u32 *r, const u32 *imm)
+static u32 cond13(const u32 *r, const u32 *imm)
{
return r[0] & imm[0];
}
-static u32 cond7(const u32 *r, const u32 *imm)
+static u32 cond8(const u32 *r, const u32 *imm)
{
return r[0] < (r[1] - imm[0]);
}
@@ -169,6 +173,8 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond10,
cond11,
cond12,
+ cond13,
+ cond14,
};
/******************************* Data Types **********************************/
@@ -181,11 +187,6 @@ enum platform_ids {
MAX_PLATFORM_IDS
};
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
struct chip_platform_defs {
u8 num_ports;
u8 num_pfs;
@@ -204,7 +205,9 @@ struct platform_defs {
u32 delay_factor;
};
-/* Storm constant definitions */
+/* Storm constant definitions.
+ * Addresses are in bytes, sizes are in quad-regs.
+ */
struct storm_defs {
char letter;
enum block_id block_id;
@@ -218,13 +221,13 @@ struct storm_defs {
u32 sem_sync_dbg_empty_addr;
u32 sem_slow_dbg_empty_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_lid_size;
u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_lid_size;
u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_lid_size;
u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_lid_size;
u32 cm_task_st_ctx_rd_addr;
};
@@ -233,17 +236,23 @@ struct block_defs {
const char *name;
bool has_dbg_bus[MAX_CHIP_IDS];
bool associated_to_storm;
- u32 storm_id; /* Valid only if associated_to_storm is true */
+
+ /* Valid only if associated_to_storm is true */
+ u32 storm_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
u32 dbg_select_addr;
- u32 dbg_cycle_enable_addr;
+ u32 dbg_enable_addr;
u32 dbg_shift_addr;
u32 dbg_force_valid_addr;
u32 dbg_force_frame_addr;
bool has_reset_bit;
- bool unreset; /* If true, the block is taken out of reset before dump */
+
+ /* If true, block is taken out of reset before dump */
+ bool unreset;
enum dbg_reset_regs reset_reg;
- u8 reset_bit_offset; /* Bit offset in reset register */
+
+ /* Bit offset in reset register */
+ u8 reset_bit_offset;
};
/* Reset register definitions */
@@ -262,12 +271,13 @@ struct grc_param_defs {
u32 crash_preset_val;
};
+/* Address is in 128b units. Width is in bits. */
struct rss_mem_defs {
const char *mem_name;
const char *type_name;
- u32 addr; /* In 128b units */
+ u32 addr;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+ u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -289,10 +299,20 @@ struct big_ram_defs {
struct phy_defs {
const char *phy_name;
+
+ /* PHY base GRC address */
u32 base_addr;
+
+ /* Relative address of indirect TBUS address register (bits 0..7) */
u32 tbus_addr_lo_addr;
+
+ /* Relative address of indirect TBUS address register (bits 8..10) */
u32 tbus_addr_hi_addr;
+
+ /* Relative address of indirect TBUS data register (bits 0..7) */
u32 tbus_data_lo_addr;
+
+ /* Relative address of indirect TBUS data register (bits 8..11) */
u32 tbus_data_hi_addr;
};
@@ -300,9 +320,11 @@ struct phy_defs {
#define MAX_LCIDS 320
#define MAX_LTIDS 320
+
#define NUM_IOR_SETS 2
#define IORS_PER_SET 176
#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+
#define BYTES_IN_DWORD sizeof(u32)
/* In the macros below, size and offset are specified in bits */
@@ -315,6 +337,7 @@ struct phy_defs {
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
FIELD_DWORD_SHIFT(type, field))
+
#define SET_VAR_FIELD(var, type, field, val) \
do { \
var[FIELD_DWORD_OFFSET(type, field)] &= \
@@ -322,31 +345,51 @@ struct phy_defs {
var[FIELD_DWORD_OFFSET(type, field)] |= \
(val) << FIELD_DWORD_SHIFT(type, field); \
} while (0)
+
#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
+
#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
+#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#endif
+#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#endif
+
+/* extra lines include a signature line + optional latency events line */
+#ifndef NUM_DBG_LINES
+#define NUM_EXTRA_DBG_LINES(block_desc) \
+ (1 + ((block_desc)->has_latency_events ? 1 : 0))
+#define NUM_DBG_LINES(block_desc) \
+ ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+#endif
+
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+
#define REG_DUMP_LEN_SHIFT 24
#define MEM_DUMP_ENTRY_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+
#define IDLE_CHK_RULE_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+
#define IDLE_CHK_RESULT_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -363,62 +406,92 @@ struct phy_defs {
#define VFC_RAM_ADDR_ROW_OFFSET 2
#define VFC_RAM_ADDR_ROW_SIZE 10
#define VFC_RAM_RESP_STRUCT_SIZE 256
+
#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+
#define NUM_VFC_RAM_TYPES 4
+
#define VFC_CAM_NUM_ROWS 512
+
#define VFC_OPCODE_CAM_RD 14
#define VFC_OPCODE_RAM_RD 0
+
#define NUM_RSS_MEM_TYPES 5
+
#define NUM_BIG_RAM_TYPES 3
#define BIG_RAM_BLOCK_SIZE_BYTES 128
#define BIG_RAM_BLOCK_SIZE_DWORDS \
BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+
#define RESET_REG_UNRESET_OFFSET 4
+
#define STALL_DELAY_MS 500
+
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_DBG_BUS_LINES 256
+
#define NUM_COMMON_GLOBAL_PARAMS 8
+
#define FW_IMG_MAIN 1
-#define REG_FIFO_DEPTH_ELEMENTS 32
+
+#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
+#endif
+#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#define IGU_FIFO_DEPTH_ELEMENTS 64
+
+#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
+#endif
+#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+
+#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#endif
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+
#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } },
- { "k2",
- { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } }
+ { "bb",
+ {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } },
+ { "ah",
+ {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } }
};
/* Storm constant definitions array */
@@ -427,69 +500,74 @@ static struct storm_defs s_storm_defs[] = {
{'T', BLOCK_TSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
- TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
TCM_REG_CTX_RBC_ACCS,
4, TCM_REG_AGG_CON_CTX,
16, TCM_REG_SM_CON_CTX,
2, TCM_REG_AGG_TASK_CTX,
4, TCM_REG_SM_TASK_CTX},
+
/* Mstorm */
{'M', BLOCK_MSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
- MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
MCM_REG_CTX_RBC_ACCS,
1, MCM_REG_AGG_CON_CTX,
10, MCM_REG_SM_CON_CTX,
2, MCM_REG_AGG_TASK_CTX,
7, MCM_REG_SM_TASK_CTX},
+
/* Ustorm */
{'U', BLOCK_USEM,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
- USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
UCM_REG_CTX_RBC_ACCS,
2, UCM_REG_AGG_CON_CTX,
13, UCM_REG_SM_CON_CTX,
3, UCM_REG_AGG_TASK_CTX,
3, UCM_REG_SM_TASK_CTX},
+
/* Xstorm */
{'X', BLOCK_XSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
- XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
XCM_REG_CTX_RBC_ACCS,
9, XCM_REG_AGG_CON_CTX,
15, XCM_REG_SM_CON_CTX,
0, 0,
0, 0},
+
/* Ystorm */
{'Y', BLOCK_YSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
- YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
YCM_REG_CTX_RBC_ACCS,
2, YCM_REG_AGG_CON_CTX,
3, YCM_REG_SM_CON_CTX,
2, YCM_REG_AGG_TASK_CTX,
12, YCM_REG_SM_TASK_CTX},
+
/* Pstorm */
{'P', BLOCK_PSEM,
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
- PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
PCM_REG_CTX_RBC_ACCS,
0, 0,
10, PCM_REG_SM_CON_CTX,
@@ -498,6 +576,7 @@ static struct storm_defs s_storm_defs[] = {
};
/* Block definitions array */
+
static struct block_defs block_grc_defs = {
"grc",
{true, true}, false, 0,
@@ -587,9 +666,11 @@ static struct block_defs block_pcie_defs = {
"pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -691,9 +772,9 @@ static struct block_defs block_pglcs_defs = {
"pglcs",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
- PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
- PGLCS_REG_DBG_FORCE_FRAME,
+ PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
+ PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
+ PGLCS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
@@ -991,10 +1072,11 @@ static struct block_defs block_yuld_defs = {
"yuld",
{true, true}, false, 0,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
- YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
- YULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+ YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
+ YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
+ YULD_REG_DBG_FORCE_FRAME_BB_K2,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 15
};
static struct block_defs block_xyld_defs = {
@@ -1143,9 +1225,9 @@ static struct block_defs block_umac_defs = {
"umac",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
- UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
- UMAC_REG_DBG_FORCE_FRAME,
+ UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
+ UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
+ UMAC_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
@@ -1177,9 +1259,9 @@ static struct block_defs block_wol_defs = {
"wol",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
- WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
- WOL_REG_DBG_FORCE_FRAME,
+ WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
+ WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
+ WOL_REG_DBG_FORCE_FRAME_K2,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
@@ -1187,9 +1269,9 @@ static struct block_defs block_bmbn_defs = {
"bmbn",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
- BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
- BMBN_REG_DBG_FORCE_FRAME,
+ BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
+ BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
+ BMBN_REG_DBG_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1204,9 +1286,9 @@ static struct block_defs block_nwm_defs = {
"nwm",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
- NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
- NWM_REG_DBG_FORCE_FRAME,
+ NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
+ NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
+ NWM_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
@@ -1214,9 +1296,9 @@ static struct block_defs block_nws_defs = {
"nws",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
- NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
- NWS_REG_DBG_FORCE_FRAME,
+ NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
+ NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
+ NWS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
@@ -1224,9 +1306,9 @@ static struct block_defs block_ms_defs = {
"ms",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
- MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
- MS_REG_DBG_FORCE_FRAME,
+ MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
+ MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
+ MS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
@@ -1234,9 +1316,11 @@ static struct block_defs block_phy_pcie_defs = {
"phy_pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1261,6 +1345,13 @@ static struct block_defs block_rgfs_defs = {
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1268,6 +1359,13 @@ static struct block_defs block_tgfs_defs = {
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1350,6 +1448,8 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_muld_defs,
&block_yuld_defs,
&block_xyld_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_prm_defs,
&block_pbf_pb1_defs,
&block_pbf_pb2_defs,
@@ -1363,6 +1463,10 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_tcfc_defs,
&block_igu_defs,
&block_cau_defs,
+ &block_rgfs_defs,
+ &block_rgsrc_defs,
+ &block_tgfs_defs,
+ &block_tgsrc_defs,
&block_umac_defs,
&block_xmac_defs,
&block_dbg_defs,
@@ -1376,10 +1480,6 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
- &block_rgfs_defs,
- &block_tgfs_defs,
- &block_ptld_defs,
- &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1392,66 +1492,151 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_VFC */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IGU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BMB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_UNSTALL */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NUM_LCIDS */
{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
- MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ MAX_LCIDS},
+
+ /* DBG_GRC_PARAM_NUM_LTIDS */
{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
- MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
+ MAX_LTIDS},
+
+ /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{0, 0}, 0, 1, false, 1, 0},
+
+ /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PHY */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NO_FW_VER */
+ {{0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
{256, 320},
{32, 32} },
+
{ "rss_mem_key_msb", "rss_key", 1024,
{128, 208},
{256, 256} },
+
{ "rss_mem_key_lsb", "rss_key", 2048,
{128, 208},
{64, 64} },
+
{ "rss_mem_info", "rss_info", 3072,
{128, 208},
{16, 16} },
+
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 208)},
+ {16384, 26624},
{16, 16} }
};
@@ -1466,50 +1651,71 @@ static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
{4800, 5632} },
+
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
{2880, 3680} },
+
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
{1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
+ /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
+ {false, true} },
+
+ /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} },
};
static struct phy_defs s_phy_defs[] = {
- {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
- {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"nw_phy", NWS_REG_NWS_CMU_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
/**************************** Private Functions ******************************/
@@ -1556,7 +1762,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
- dev_data->chip_id = CHIP_BB_B0;
+ dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
@@ -1569,9 +1775,20 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
qed_dbg_grc_init_params(p_hwfn);
dev_data->initialized = true;
+
return DBG_STATUS_OK;
}
+static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
+ MAX_CHIP_IDS +
+ dev_data->chip_id];
+}
+
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
@@ -1579,25 +1796,28 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 storm_id, struct fw_info *fw_info)
{
- /* Read first the address that points to fw_info location.
- * The address is located in the last line of the Storm RAM.
- */
- u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
- sizeof(struct fw_info_location);
+ struct storm_defs *storm = &s_storm_defs[storm_id];
struct fw_info_location fw_info_location;
- u32 *dest = (u32 *)&fw_info_location;
- u32 i;
+ u32 addr, i, *dest;
memset(&fw_info_location, 0, sizeof(fw_info_location));
memset(fw_info, 0, sizeof(*fw_info));
+
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+ dest = (u32 *)&fw_info_location;
+
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+
+ /* Read FW version info from Storm RAM */
if (fw_info_location.size > 0 && fw_info_location.size <=
sizeof(*fw_info)) {
- /* Read FW version info from Storm RAM */
addr = fw_info_location.grc_addr;
dest = (u32 *)fw_info;
for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
@@ -1606,27 +1826,30 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
}
}
-/* Dumps the specified string to the specified buffer. Returns the dumped size
- * in bytes (actual length + 1 for the null character termination).
+/* Dumps the specified string to the specified buffer.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
{
if (dump)
strcpy(dump_buf, str);
+
return (u32)strlen(str) + 1;
}
-/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
- * in bytes.
+/* Dumps zeros to align the specified buffer to dwords.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
{
- u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+ u8 offset_in_dword, align_size;
+ offset_in_dword = (u8)(byte_offset & 0x3);
align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
if (dump && align_size)
memset(dump_buf, 0, align_size);
+
return align_size;
}
@@ -1653,6 +1876,7 @@ static u32 qed_dump_str_param(u32 *dump_buf,
/* Align buffer to next dword */
offset += qed_dump_align(char_buf + offset, dump, offset);
+
return BYTES_TO_DWORDS(offset);
}
@@ -1681,6 +1905,7 @@ static u32 qed_dump_num_param(u32 *dump_buf,
if (dump)
*(dump_buf + offset) = param_val;
offset++;
+
return offset;
}
@@ -1695,7 +1920,6 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
- int printed_chars;
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
@@ -1705,37 +1929,32 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
storm_id++) {
- /* Read FW version/image */
- if (!dev_data->block_in_reset
- [s_storm_defs[storm_id].block_id]) {
- /* read FW info for the current Storm */
- qed_read_fw_info(p_hwfn,
- p_ptt, storm_id, &fw_info);
-
- /* Create FW version/image strings */
- printed_chars =
- snprintf(fw_ver_str,
- sizeof(fw_ver_str),
- "%d_%d_%d_%d",
- fw_info.ver.num.major,
- fw_info.ver.num.minor,
- fw_info.ver.num.rev,
- fw_info.ver.num.eng);
- if (printed_chars < 0 || printed_chars >=
- sizeof(fw_ver_str))
- DP_NOTICE(p_hwfn,
- "Unexpected debug error: invalid FW version string\n");
- switch (fw_info.ver.image_id) {
- case FW_IMG_MAIN:
- strcpy(fw_img_str, "main");
- break;
- default:
- strcpy(fw_img_str, "unknown");
- break;
- }
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- found = true;
+ /* Read FW version/image */
+ if (dev_data->block_in_reset[storm->block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+ "%d_%d_%d_%d", fw_info.ver.num.major,
+ fw_info.ver.num.minor, fw_info.ver.num.rev,
+ fw_info.ver.num.eng) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
}
+
+ found = true;
}
}
@@ -1747,6 +1966,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"fw-timestamp", fw_info.ver.timestamp);
+
return offset;
}
@@ -1759,17 +1979,18 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
- int printed_chars;
- /* Find MCP public data GRC address.
- * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ /* Find MCP public data GRC address. Needs to be ORed with
+ * MCP_REG_SCRATCH due to a HW bug.
*/
- public_data_addr = qed_rd(p_hwfn, p_ptt,
+ public_data_addr = qed_rd(p_hwfn,
+ p_ptt,
MISC_REG_SHARED_MEM_ADDR) |
- MCP_REG_SCRATCH;
+ MCP_REG_SCRATCH;
/* Find MCP public global section offset */
global_section_offsize_addr = public_data_addr +
@@ -1778,9 +1999,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
sizeof(offsize_t) * PUBLIC_GLOBAL;
global_section_offsize = qed_rd(p_hwfn, p_ptt,
global_section_offsize_addr);
- global_section_addr = MCP_REG_SCRATCH +
- (global_section_offsize &
- OFFSIZE_OFFSET_MASK) * 4;
+ global_section_addr =
+ MCP_REG_SCRATCH +
+ (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
/* Read MFW version from MCP public global section */
mfw_ver = qed_rd(p_hwfn, p_ptt,
@@ -1788,13 +2009,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
offsetof(struct public_global, mfw_ver));
/* Dump MFW version param */
- printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
- "%d_%d_%d_%d",
- (u8) (mfw_ver >> 24),
- (u8) (mfw_ver >> 16),
- (u8) (mfw_ver >> 8),
- (u8) mfw_ver);
- if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
+ (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
+ (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid MFW version string\n");
}
@@ -1820,11 +2037,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
- u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
+ u8 num_params;
- /* Find platform string and dump global params section header */
+ /* Dump global params section header */
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -1846,25 +2064,29 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset +=
qed_dump_num_param(dump_buf + offset, dump, "pci-func",
p_hwfn->abs_pf_id);
+
return offset;
}
-/* Writes the last section to the specified buffer at the given offset.
- * Returns the dumped size in dwords.
+/* Writes the "last" section (including CRC) to the specified buffer at the
+ * given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 offset, bool dump)
{
- u32 start_offset = offset, crc = ~0;
+ u32 start_offset = offset;
/* Dump CRC section header */
offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
- /* Calculate CRC32 and add it to the dword following the "last" section.
- */
+ /* Calculate CRC32 and add it to the dword after the "last" section */
if (dump)
- *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ *(dump_buf + offset) = ~crc32(0xffffffff,
+ (u8 *)dump_buf,
DWORDS_TO_BYTES(offset));
+
offset++;
+
return offset - start_offset;
}
@@ -1883,11 +2105,12 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
p_ptt, s_reset_regs_defs[i].addr);
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- dev_data->block_in_reset[i] =
- s_block_defs[i]->has_reset_bit &&
- !(reg_val[s_block_defs[i]->reset_reg] &
- BIT(s_block_defs[i]->reset_bit_offset));
+ for (i = 0; i < MAX_BLOCK_ID; i++) {
+ struct block_defs *block = s_block_defs[i];
+
+ dev_data->block_in_reset[i] = block->has_reset_bit &&
+ !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ }
}
/* Enable / disable the Debug block */
@@ -1902,12 +2125,12 @@ static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
- dbg_reset_reg_addr =
- s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val = old_reset_reg_val &
- ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
@@ -1920,8 +2143,8 @@ static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
}
-/* Enable / disable Debug Bus clients according to the specified mask.
- * (1 = enable, 0 = disable)
+/* Enable / disable Debug Bus clients according to the specified mask
+ * (1 = enable, 0 = disable).
*/
static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 client_mask)
@@ -1931,10 +2154,14 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
{
- const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
bool arg1, arg2;
+ const u32 *ptr;
+ u8 tree_val;
+
+ /* Get next element from modes tree buffer */
+ ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
@@ -1974,75 +2201,81 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
+ struct block_defs *block = s_block_defs[block_id];
u8 i;
/* Check Storm match */
- if (s_block_defs[block_id]->associated_to_storm &&
+ if (block->associated_to_storm &&
!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ (enum dbg_storms)block->storm_id))
return false;
- for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
- if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
- mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
- return qed_grc_is_included(p_hwfn,
- s_big_ram_defs[i].grc_param);
- if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
- MEM_GROUP_PXP_MEM)
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
+ struct big_ram_defs *big_ram = &s_big_ram_defs[i];
+
+ if (mem_group_id == big_ram->mem_group_id ||
+ mem_group_id == big_ram->ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn, big_ram->grc_param);
+ }
+
+ switch (mem_group_id) {
+ case MEM_GROUP_PXP_ILT:
+ case MEM_GROUP_PXP_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
- if (mem_group_id == MEM_GROUP_RAM)
+ case MEM_GROUP_RAM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
- if (mem_group_id == MEM_GROUP_PBUF)
+ case MEM_GROUP_PBUF:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
- if (mem_group_id == MEM_GROUP_CAU_MEM ||
- mem_group_id == MEM_GROUP_CAU_SB ||
- mem_group_id == MEM_GROUP_CAU_PI)
+ case MEM_GROUP_CAU_MEM:
+ case MEM_GROUP_CAU_SB:
+ case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
- if (mem_group_id == MEM_GROUP_QM_MEM)
+ case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
- mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ case MEM_GROUP_CFC_MEM:
+ case MEM_GROUP_CONN_CFC_MEM:
+ case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
- if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
- MEM_GROUP_IGU_MSIX)
+ case MEM_GROUP_IGU_MEM:
+ case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
- if (mem_group_id == MEM_GROUP_MULD_MEM)
+ case MEM_GROUP_MULD_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
- if (mem_group_id == MEM_GROUP_PRS_MEM)
+ case MEM_GROUP_PRS_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
- if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ case MEM_GROUP_DMAE_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
- if (mem_group_id == MEM_GROUP_TM_MEM)
+ case MEM_GROUP_TM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
- if (mem_group_id == MEM_GROUP_SDM_MEM)
+ case MEM_GROUP_SDM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
- if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
- MEM_GROUP_RDIF_CTX)
+ case MEM_GROUP_TDIF_CTX:
+ case MEM_GROUP_RDIF_CTX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
- if (mem_group_id == MEM_GROUP_CM_MEM)
+ case MEM_GROUP_CM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
- if (mem_group_id == MEM_GROUP_IOR)
+ case MEM_GROUP_IOR:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
-
- return true;
+ default:
+ return true;
+ }
}
/* Stalls all Storms */
static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool stall)
{
- u8 reg_val = stall ? 1 : 0;
+ u32 reg_addr;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- u32 reg_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0;
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
- qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
- }
+ reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0_BB_K2;
+ qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
msleep(STALL_DELAY_MS);
@@ -2054,24 +2287,29 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 block_id, i;
/* Fill reset regs values */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
- reg_val[s_block_defs[i]->reset_reg] |=
- BIT(s_block_defs[i]->reset_bit_offset);
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ struct block_defs *block = s_block_defs[block_id];
+
+ if (block->has_reset_bit && block->unreset)
+ reg_val[block->reset_reg] |=
+ BIT(block->reset_bit_offset);
+ }
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
- }
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
+
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
}
}
@@ -2095,6 +2333,7 @@ qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
qed_get_block_attn_data(block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
+
return &((const struct dbg_attn_reg *)
s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
regs_offset];
@@ -2105,34 +2344,34 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 reg_idx, num_attn_regs;
u32 block_id;
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id])
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode */
- bool eval_mode = GET_FIELD(reg_data->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- u16 modes_buf_offset =
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ /* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
- /* Mode match - read parity status read-clear
- * register.
- */
qed_rd(p_hwfn, p_ptt,
DWORDS_TO_BYTES(reg_data->
sts_clr_address));
@@ -2142,11 +2381,11 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* The following parameters are dumped:
- * - 'count' = num_dumped_entries
- * - 'split' = split_type
- * - 'id' = split_id (dumped only if split_id >= 0)
- * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
- * param_val != NULL)
+ * - count: no. of dumped entries
+ * - split: split type
+ * - id: split ID (dumped only if split_id >= 0)
+ * - param_name: user parameter value (dumped only if param_name != NULL
+ * and param_val != NULL).
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
@@ -2170,84 +2409,100 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
if (param_name && param_val)
offset += qed_dump_str_param(dump_buf + offset,
dump, param_name, param_val);
+
return offset;
}
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
- if (dump)
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- else
- offset += len;
+ if (!dump)
+ return len;
+
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+
return offset;
}
-/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
- u32 len)
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
{
if (dump)
*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+
return 1;
}
-/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
+/* Dumps GRC registers sequence. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
/* Dumps GRC registers sequence with skip cycle.
* Returns the dumped size in dwords.
+ * - addr: start GRC address in dwords
+ * - total_len: total no. of dwords to dump
+ * - read_len: no. consecutive dwords to read
+ * - skip_len: no. of dwords to skip (and fill with zeros)
*/
static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 total_len,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 addr,
+ u32 total_len,
u32 read_len, u32 skip_len)
{
u32 offset = 0, reg_offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
- if (dump) {
- while (reg_offset < total_len) {
- u32 curr_len = min_t(u32,
- read_len,
- total_len - reg_offset);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump, addr, curr_len);
+
+ if (!dump)
+ return offset + total_len;
+
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
+
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len, false);
+ reg_offset += curr_len;
+ addr += curr_len;
+
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32, skip_len, total_len - skip_len);
+ memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
reg_offset += curr_len;
addr += curr_len;
- if (reg_offset < total_len) {
- curr_len = min_t(u32,
- skip_len,
- total_len - skip_len);
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(curr_len));
- offset += curr_len;
- reg_offset += curr_len;
- addr += curr_len;
- }
}
- } else {
- offset += total_len;
}
return offset;
@@ -2266,43 +2521,48 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
bool mode_match = true;
*num_dumped_reg_entries = 0;
+
while (input_offset < input_regs_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
&input_regs_arr.ptr[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode/block */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match && block_enable[cond_hdr->block_id]) {
- for (i = 0; i < cond_hdr->data_size;
- i++, input_offset++) {
- const struct dbg_dump_reg *reg =
- (const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
- u32 addr, len;
-
- addr = GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS);
- len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- (*num_dumped_reg_entries)++;
- }
- } else {
+ if (!mode_match || !block_enable[cond_hdr->block_id]) {
input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ bool wide_bus;
+
+ addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
+ wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ wide_bus);
+ (*num_dumped_reg_entries)++;
}
}
@@ -2350,8 +2610,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
return num_dumped_reg_entries > 0 ? offset : 0;
}
-/* Dumps registers according to the input registers array.
- * Returns the dumped size in dwords.
+/* Dumps registers according to the input registers array. Returns the dumped
+ * size in dwords.
*/
static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2361,29 +2621,37 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct chip_platform_defs *p_platform_defs;
+ struct chip_platform_defs *chip_platform;
u32 offset = 0, input_offset = 0;
- struct chip_defs *p_chip_defs;
+ struct chip_defs *chip;
u8 port_id, pf_id, vf_id;
u16 fid;
- p_chip_defs = &s_chip_defs[dev_data->chip_id];
- p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
+ chip = &s_chip_defs[dev_data->chip_id];
+ chip_platform = &chip->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_regs_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr =
(const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_regs_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_regs_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
+ curr_input_regs_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2398,8 +2666,9 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_name,
param_val);
break;
+
case SPLIT_TYPE_PORT:
- for (port_id = 0; port_id < p_platform_defs->num_ports;
+ for (port_id = 0; port_id < chip_platform->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2414,9 +2683,10 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
}
break;
+
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
+ for (pf_id = 0; pf_id < chip_platform->num_pfs;
pf_id++) {
u8 pfid_shift =
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
@@ -2427,17 +2697,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
offset +=
- qed_grc_dump_split_data(p_hwfn, p_ptt,
+ qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
curr_input_regs_arr,
dump_buf + offset,
- dump, block_enable,
- "pf", pf_id,
+ dump,
+ block_enable,
+ "pf",
+ pf_id,
param_name,
param_val);
}
break;
+
case SPLIT_TYPE_VF:
- for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ for (vf_id = 0; vf_id < chip_platform->num_vfs;
vf_id++) {
u8 vfvalid_shift =
PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
@@ -2460,6 +2734,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
}
break;
+
default:
break;
}
@@ -2490,35 +2765,37 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_regs++;
- }
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs[i].addr), 1,
+ false);
+ num_regs++;
}
/* Write header */
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, "eng", -1, NULL, NULL);
+
return offset;
}
-/* Dump registers that are modified during GRC Dump and therefore must be dumped
- * first. Returns the dumped size in dwords.
+/* Dump registers that are modified during GRC Dump and therefore must be
+ * dumped first. Returns the dumped size in dwords.
*/
static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, num_reg_entries = 0, block_id;
+ u32 block_id, offset = 0, num_reg_entries = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
/* Calculate header size */
@@ -2527,14 +2804,13 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write parity registers */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id] && dump)
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
@@ -2548,37 +2824,36 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
- if (!eval_mode ||
- qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
- /* Mode match - read and dump registers */
- addr = reg_data->mask_address;
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- addr = GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_reg_entries += 2;
- }
+ if (eval_mode &&
+ !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match: read & dump registers */
+ addr = reg_data->mask_address;
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ num_reg_entries += 2;
}
}
- /* Write storm stall status registers */
+ /* Write Storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
- dump)
+ if (dev_data->block_in_reset[storm->block_id] && dump)
continue;
addr =
@@ -2589,7 +2864,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
num_reg_entries++;
}
@@ -2598,6 +2874,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
qed_grc_dump_regs_hdr(dump_buf,
true,
num_reg_entries, "eng", -1, NULL, NULL);
+
return offset;
}
@@ -2637,17 +2914,17 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Dumps a GRC memory header (section and params).
- * The following parameters are dumped:
- * name - name is dumped only if it's not NULL.
- * addr - addr is dumped only if name is NULL.
- * len - len is always dumped.
- * width - bit_width is dumped if it's not zero.
- * packed - packed=1 is dumped if it's not false.
- * mem_group - mem_group is always dumped.
- * is_storm - true only if the memory is related to a Storm.
- * storm_letter - storm letter (valid only if is_storm is true).
- * Returns the dumped size in dwords.
+/* Dumps a GRC memory header (section and params). Returns the dumped size in
+ * dwords. The following parameters are dumped:
+ * - name: dumped only if it's not NULL.
+ * - addr: in dwords, dumped only if name is NULL.
+ * - len: in dwords, always dumped.
+ * - width: dumped if it's not zero.
+ * - packed: dumped only if it's not false.
+ * - mem_group: always dumped.
+ * - is_storm: true only if the memory is related to a Storm.
+ * - storm_letter: valid only if is_storm is true.
+ *
*/
static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
@@ -2667,6 +2944,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+
if (bit_width)
num_params++;
if (packed)
@@ -2675,6 +2953,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
/* Dump section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "grc_mem", num_params);
+
if (name) {
/* Dump name */
if (is_storm) {
@@ -2694,14 +2973,15 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
len, buf);
} else {
/* Dump address */
+ u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
+
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr",
- DWORDS_TO_BYTES(addr));
+ dump, "addr", addr_in_bytes);
if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- len, (u32)DWORDS_TO_BYTES(addr));
+ len, addr_in_bytes);
}
/* Dump len */
@@ -2727,11 +3007,13 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
}
offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+
return offset;
}
/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2740,6 +3022,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
const char *name,
u32 addr,
u32 len,
+ bool wide_bus,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2758,7 +3041,9 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
mem_group, is_storm, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
@@ -2773,20 +3058,21 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
while (input_offset < input_mems_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr;
+ u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
cond_hdr = (const struct dbg_dump_cond_hdr *)
&input_mems_arr.ptr[input_offset++];
- eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
@@ -2796,81 +3082,87 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
continue;
}
- num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
(const struct dbg_dump_mem *)
&input_mems_arr.ptr[input_offset];
- u8 mem_group_id;
+ u8 mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ bool is_storm = false, mem_wide_bus;
+ enum dbg_grc_params grc_param;
+ char storm_letter = 'a';
+ enum block_id block_id;
+ u32 mem_addr, mem_len;
- mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- if (qed_grc_is_mem_included(p_hwfn,
- (enum block_id)cond_hdr->block_id,
- mem_group_id)) {
- u32 mem_addr = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS);
- u32 mem_len = GET_FIELD(mem->dword1,
- DBG_DUMP_MEM_LENGTH);
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- bool is_storm = false;
-
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id ==
- MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LTIDS);
+ block_id = (enum block_id)cond_hdr->block_id;
+ if (!qed_grc_is_mem_included(p_hwfn,
+ block_id,
+ mem_group_id))
+ continue;
+
+ mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
+ mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
+ mem_wide_bus = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_WIDE_BUS);
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
}
- /* If memory is associated with Storm, update
- * Storm details.
- */
- if (s_block_defs[cond_hdr->block_id]->
- associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs[
- cond_hdr->block_id]->
- storm_id].letter;
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
}
- /* Dump memory */
- offset += qed_grc_dump_mem(p_hwfn, p_ptt,
- dump_buf + offset, dump, NULL,
- mem_addr, mem_len, 0,
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
+
+ /* If memory is associated with Storm, update Storm
+ * details.
+ */
+ if (s_block_defs
+ [cond_hdr->block_id]->associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs
+ [cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ mem_addr,
+ mem_len,
+ mem_wide_bus,
+ 0,
false,
s_mem_group_names[mem_group_id],
- is_storm, storm_letter);
- }
- }
+ is_storm,
+ storm_letter);
+ }
}
return offset;
@@ -2887,16 +3179,22 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
- (const struct dbg_dump_split_hdr *)
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_mems_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr = (const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_mems_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
+ curr_input_mems_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2906,6 +3204,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump);
break;
+
default:
DP_NOTICE(p_hwfn,
"Dumping split memories is currently not supported\n");
@@ -2920,6 +3219,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
/* Dumps GRC context data for the specified Storm.
* Returns the dumped size in dwords.
+ * The lid_size argument is specified in quad-regs.
*/
static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2931,13 +3231,15 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
u32 rd_reg_addr,
u8 storm_id)
{
- u32 i, lid, total_size;
- u32 offset = 0;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 i, lid, total_size, offset = 0;
if (!lid_size)
return 0;
+
lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -2945,25 +3247,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
0,
total_size,
lid_size * 32,
- false,
- name,
- true, s_storm_defs[storm_id].letter);
+ false, name, true, storm->letter);
+
+ if (!dump)
+ return offset + total_size;
/* Dump context data */
- if (dump) {
- for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].cm_ctx_wr_addr,
- (i << 9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- rd_reg_addr);
- }
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt, rd_reg_addr);
}
- } else {
- offset += total_size;
}
return offset;
@@ -2973,15 +3269,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
+ enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -2989,14 +3289,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"CONN_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_ag_ctx_lid_size,
+ storm->cm_conn_ag_ctx_rd_addr,
storm_id);
/* Dump Conn ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3004,14 +3303,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"CONN_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_st_ctx_lid_size,
+ storm->cm_conn_st_ctx_rd_addr,
storm_id);
/* Dump Task AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3019,14 +3317,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"TASK_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_ag_ctx_lid_size,
+ storm->cm_task_ag_ctx_rd_addr,
storm_id);
/* Dump Task ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3034,11 +3331,9 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"TASK_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_st_ctx_lid_size,
+ storm->cm_task_st_ctx_rd_addr,
storm_id);
}
@@ -3050,8 +3345,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
char buf[10] = "IOR_SET_?";
+ u32 addr, offset = 0;
u8 storm_id, set_id;
- u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
@@ -3061,11 +3356,9 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
continue;
for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 dwords, addr;
-
- dwords = storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE;
- addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE) +
+ IOR_SET_OFFSET(set_id);
buf[strlen(buf) - 1] = '0' + set_id;
offset += qed_grc_dump_mem(p_hwfn,
p_ptt,
@@ -3074,6 +3367,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
buf,
addr,
IORS_PER_SET,
+ false,
32,
false,
"ior",
@@ -3091,10 +3385,10 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 storm_id)
{
u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3103,38 +3397,34 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
0,
total_size,
256,
- false,
- "vfc_cam",
- true, s_storm_defs[storm_id].letter);
- if (dump) {
- /* Prepare CAM address */
- SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
- SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
+ false, "vfc_cam", true, storm->letter);
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
+ if (!dump)
+ return offset + total_size;
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
- }
- } else {
- offset += total_size;
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
}
return offset;
@@ -3148,10 +3438,10 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
u8 storm_id, struct vfc_ram_defs *ram_defs)
{
u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3162,7 +3452,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
256,
false,
ram_defs->type_name,
- true, s_storm_defs[storm_id].letter);
+ true, storm->letter);
/* Prepare RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
@@ -3176,23 +3466,20 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
/* Write VFC RAM command */
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
ram_cmd, VFC_RAM_CMD_DWORDS);
/* Write VFC RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
ram_addr, VFC_RAM_ADDR_DWORDS);
/* Read VFC RAM read response */
ARR_REG_RD(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
dump_buf + offset, VFC_RAM_RESP_DWORDS);
}
@@ -3208,28 +3495,27 @@ static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id) &&
- s_storm_defs[storm_id].has_vfc &&
- (storm_id != DBG_PSTORM_ID ||
- dev_data->platform_id == PLATFORM_ASIC)) {
- /* Read CAM */
- offset += qed_grc_dump_vfc_cam(p_hwfn,
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) ||
+ !s_storm_defs[storm_id].has_vfc ||
+ (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
+ PLATFORM_ASIC))
+ continue;
+
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, storm_id);
-
- /* Read RAM */
- for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
- offset += qed_grc_dump_vfc_ram(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- storm_id,
- &s_vfc_ram_defs
- [i]);
- }
+ dump,
+ storm_id,
+ &s_vfc_ram_defs[i]);
}
return offset;
@@ -3244,14 +3530,17 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
- u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
- u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_dwords = (num_entries * entry_width) / 32;
- u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
- bool packed = (entry_width == 16);
- u32 rss_addr = rss_defs->addr;
- u32 i, addr;
+ u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ struct rss_mem_defs *rss_defs;
+ u32 addr, size;
+ bool packed;
+
+ rss_defs = &s_rss_mem_defs[rss_mem_id];
+ rss_addr = rss_defs->addr;
+ num_entries = rss_defs->num_entries[dev_data->chip_id];
+ entry_width = rss_defs->entry_width[dev_data->chip_id];
+ total_dwords = (num_entries * entry_width) / 32;
+ packed = (entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3263,23 +3552,23 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
packed,
rss_defs->type_name, false, 0);
+ /* Dump RSS data */
if (!dump) {
offset += total_dwords;
continue;
}
- /* Dump RSS data */
- for (i = 0; i < total_dwords;
- i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
- addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ size = RSS_REG_RSS_RAM_DATA_SIZE;
+ for (i = 0; i < total_dwords; i += size, rss_addr++) {
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- addr,
- size);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ size,
+ false);
}
}
@@ -3316,10 +3605,11 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
BIG_RAM_BLOCK_SIZE_BYTES * 8,
false, type_name, false, 0);
+ /* Read and dump Big RAM data */
if (!dump)
return offset + ram_size;
- /* Read and dump Big RAM data */
+ /* Dump Big RAM */
for (i = 0; i < total_blocks / 2; i++) {
u32 addr, len;
@@ -3331,7 +3621,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- len);
+ len,
+ false);
}
return offset;
@@ -3359,7 +3650,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3369,7 +3660,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
@@ -3387,11 +3678,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
return offset;
}
@@ -3404,14 +3697,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
u8 phy_id;
for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
- struct phy_defs *phy_defs = &s_phy_defs[phy_id];
- int printed_chars;
-
- printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
- phy_defs->phy_name);
- if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
+ struct phy_defs *phy_defs;
+ u8 *bytes_buf;
+
+ phy_defs = &s_phy_defs[phy_id];
+ addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ bytes_buf = (u8 *)(dump_buf + offset);
+
+ if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid PHY memory name\n");
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -3419,34 +3724,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
0,
PHY_DUMP_SIZE_DWORDS,
16, true, mem_name, false, 0);
- if (dump) {
- u32 addr_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_lo_addr;
- u32 addr_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_hi_addr;
- u32 data_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_data_lo_addr;
- u32 data_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_data_hi_addr;
- u8 *bytes_buf = (u8 *)(dump_buf + offset);
-
- for (tbus_hi_offset = 0;
- tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
- tbus_hi_offset++) {
+
+ if (!dump) {
+ offset += PHY_DUMP_SIZE_DWORDS;
+ continue;
+ }
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
qed_wr(p_hwfn,
- p_ptt, addr_hi_addr, tbus_hi_offset);
- for (tbus_lo_offset = 0; tbus_lo_offset < 256;
- tbus_lo_offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- addr_lo_addr, tbus_lo_offset);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_lo_addr);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_hi_addr);
- }
+ p_ptt, addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_hi_addr);
}
}
@@ -3460,16 +3757,17 @@ static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
u8 line_id,
- u8 cycle_en,
- u8 right_shift, u8 force_valid, u8 force_frame)
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
{
- struct block_defs *p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+ qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3477,10 +3775,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id;
- struct block_defs *p_block_defs;
+ u32 block_id, line_id, offset = 0;
+
+ /* Skip static debug if a debug bus recording is in progress */
+ if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ return 0;
if (dump) {
DP_VERBOSE(p_hwfn,
@@ -3488,11 +3788,11 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- if (p_block_defs->has_dbg_bus[dev_data->chip_id])
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
+ if (block->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
+ 0);
}
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
@@ -3506,59 +3806,71 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
+ struct dbg_bus_block *block_desc;
+ u32 block_dwords, addr, len;
+ u8 dbg_client_id;
- if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ if (!block->has_dbg_bus[dev_data->chip_id])
continue;
+ block_desc =
+ get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
+ block_dwords = NUM_DBG_LINES(block_desc) *
+ STATIC_DEBUG_LINE_DWORDS;
+
/* Dump static section params */
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
- p_block_defs->name, 0,
- block_dwords, 32, false,
- "STATIC", false, 0);
-
- if (dump && !dev_data->block_in_reset[block_id]) {
- u8 dbg_client_id =
- p_block_defs->dbg_client_id[dev_data->chip_id];
- u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
- u32 len = STATIC_DEBUG_LINE_DWORDS;
-
- /* Enable block's client */
- qed_bus_enable_clients(p_hwfn, p_ptt,
- BIT(dbg_client_id));
-
- for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
- line_id++) {
- /* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id,
- 0xf, 0, 0, 0);
+ block->name,
+ 0,
+ block_dwords,
+ 32, false, "STATIC", false, 0);
- /* Read debug line info */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- }
+ if (!dump) {
+ offset += block_dwords;
+ continue;
+ }
- /* Disable block's client and debug output */
- qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
- } else {
- /* All lines are invalid - dump zeros */
- if (dump)
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(block_dwords));
+ /* If all lines are invalid - dump zeros */
+ if (dev_data->block_in_reset[block_id]) {
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
offset += block_dwords;
+ continue;
}
+
+ /* Enable block's client */
+ dbg_client_id = block->dbg_client_id[dev_data->chip_id];
+ qed_bus_enable_clients(p_hwfn,
+ p_ptt,
+ BIT(dbg_client_id));
+
+ addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ len = STATIC_DEBUG_LINE_DWORDS;
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ true);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
}
if (dump) {
@@ -3584,8 +3896,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*num_dumped_dwords = 0;
- /* Find port mode */
if (dump) {
+ /* Find port mode */
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0:
port_mode = 1;
@@ -3597,11 +3909,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
port_mode = 4;
break;
}
- }
- /* Update reset state */
- if (dump)
+ /* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -3635,7 +3946,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
DP_NOTICE(p_hwfn,
@@ -3661,9 +3973,9 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
/* Dump all regs */
if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
- /* Dump all blocks except MCP */
bool block_enable[MAX_BLOCK_ID];
+ /* Dump all blocks except MCP */
for (i = 0; i < MAX_BLOCK_ID; i++)
block_enable[i] = true;
block_enable[BLOCK_MCP] = false;
@@ -3732,7 +4044,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
if (dump) {
/* Unstall storms */
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
@@ -3763,19 +4076,20 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_rule *rule,
u16 fail_entry_id, u32 *cond_reg_values)
{
- const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays
- [BIN_BUF_DBG_IDLE_CHK_REGS].
- ptr)[rule->reg_offset];
- const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct dbg_idle_chk_result_hdr *hdr =
- (struct dbg_idle_chk_result_hdr *)dump_buf;
- const struct dbg_idle_chk_info_reg *info_regs =
- &regs[rule->num_cond_regs].info_reg;
- u32 next_reg_offset = 0, i, offset = 0;
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_info_reg *info_regs;
+ u32 i, next_reg_offset = 0, offset = 0;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const union dbg_idle_chk_reg *regs;
u8 reg_id;
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ info_regs = &regs[rule->num_cond_regs].info_reg;
+
/* Dump rule data */
if (dump) {
memset(hdr, 0, sizeof(*hdr));
@@ -3790,33 +4104,31 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
/* Dump condition register values */
for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
- /* Write register header */
- if (dump) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
- + offset);
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- memset(reg_hdr, 0,
- sizeof(struct dbg_idle_chk_result_reg_hdr));
- reg_hdr->start_entry = reg->start_entry;
- reg_hdr->size = reg->entry_size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
- reg->num_entries > 1 || reg->start_entry > 0
- ? 1 : 0);
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
- /* Write register values */
- for (i = 0; i < reg_hdr->size;
- i++, next_reg_offset++, offset++)
- dump_buf[offset] =
- cond_reg_values[next_reg_offset];
- } else {
+ /* Write register header */
+ if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
reg->entry_size;
+ continue;
}
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
+ dump_buf[offset] = cond_reg_values[next_reg_offset];
}
/* Dump info register values */
@@ -3824,12 +4136,12 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
u32 block_id;
+ /* Check if register's block is in reset */
if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
continue;
}
- /* Check if register's block is in reset */
block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3837,47 +4149,50 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (!dev_data->block_in_reset[block_id]) {
- bool eval_mode = GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool wide_bus, eval_mode, mode_match = true;
+ u16 modes_buf_offset;
+ u32 addr;
+
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
/* Check mode */
+ eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
- GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_MODES_BUF_OFFSET);
+ modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match =
qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match) {
- u32 addr =
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS);
-
- /* Write register header */
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
-
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- hdr->num_dumped_info_regs++;
- memset(reg_hdr, 0, sizeof(*reg_hdr));
- reg_hdr->size = reg->size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
-
- /* Write register values */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- reg->size);
- }
+ if (!mode_match)
+ continue;
+
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
+ wide_bus = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
+
+ /* Write register header */
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size, wide_bus);
}
}
@@ -3898,6 +4213,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 reg_id;
*num_failing_rules = 0;
+
for (i = 0; i < num_input_rules; i++) {
const struct dbg_idle_chk_cond_reg *cond_regs;
const struct dbg_idle_chk_rule *rule;
@@ -3920,8 +4236,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
reg_id++) {
- u32 block_id = GET_FIELD(cond_regs[reg_id].data,
- DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+ u32 block_id =
+ GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3936,48 +4253,47 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
- if (!dump) {
- u32 entry_dump_size =
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- 0,
- NULL);
-
- offset += num_reg_entries * entry_dump_size;
- (*num_failing_rules) += num_reg_entries;
- continue;
- }
-
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
- /* Read current entry of all condition registers */
u32 next_reg_offset = 0;
+ if (!dump) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ entry_id,
+ NULL);
+ (*num_failing_rules)++;
+ break;
+ }
+
+ /* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
+ u32 padded_entry_size, addr;
+ bool wide_bus;
- /* Find GRC address (if it's a memory,the
+ /* Find GRC address (if it's a memory, the
* address of the specific entry is calculated).
*/
- u32 addr =
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+ wide_bus =
GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS);
-
+ DBG_IDLE_CHK_COND_REG_WIDE_BUS);
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size) :
- 1;
-
+ padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size)
+ : 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -3991,28 +4307,27 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
next_reg_offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
cond_reg_values +
next_reg_offset,
dump, addr,
- reg->entry_size);
+ reg->entry_size,
+ wide_bus);
}
- /* Call rule's condition function - a return value of
- * true indicates failure.
+ /* Call rule condition function.
+ * If returns true, it's a failure.
*/
- if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values)) {
- offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ if ((*cond_arr[rule->cond_id]) (cond_reg_values,
+ imm_values)) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -4028,8 +4343,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 offset = 0, input_offset = 0, num_failing_rules = 0;
- u32 num_failing_rules_offset;
+ u32 num_failing_rules_offset, offset = 0, input_offset = 0;
+ u32 num_failing_rules = 0;
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -4042,29 +4357,29 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
(const struct dbg_idle_chk_cond_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ bool eval_mode, mode_match = true;
+ u32 curr_failing_rules;
+ u16 modes_buf_offset;
/* Check mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
if (mode_match) {
- u32 curr_failing_rules;
-
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
p_ptt,
@@ -4086,10 +4401,13 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + num_failing_rules_offset,
dump, "num_rules", num_failing_rules);
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
-/* Finds the meta data image in NVRAM. */
+/* Finds the meta data image in NVRAM */
static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 image_type,
@@ -4098,16 +4416,16 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
{
u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
struct mcp_file_att file_att;
+ int nvm_result;
/* Call NVRAM get file command */
- int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size,
- (u32 *)&file_att);
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att);
/* Check response */
if (nvm_result ||
@@ -4117,6 +4435,7 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
/* Update return values */
*nvram_offset_bytes = file_att.nvm_start_addr;
*nvram_size_bytes = file_att.len;
+
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
@@ -4125,22 +4444,25 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
/* Check alignment */
if (*nvram_size_bytes & 0x3)
return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
return DBG_STATUS_OK;
}
+/* Reads data from NVRAM */
static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_bytes,
u32 nvram_size_bytes, u32 *ret_buf)
{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
- u32 bytes_to_copy, read_offset = 0;
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"nvram_read: reading image of size %d bytes from NVRAM\n",
nvram_size_bytes);
+
do {
bytes_to_copy =
(bytes_left >
@@ -4155,8 +4477,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
DRV_MB_PARAM_NVM_LEN_SHIFT),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
- (u32 *)((u8 *)ret_buf +
- read_offset)) != 0)
+ (u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
/* Check response */
@@ -4172,24 +4493,20 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
}
/* Get info on the MCP Trace data in the scratchpad:
- * - trace_data_grc_addr - the GRC address of the trace data
- * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
- * the header)
+ * - trace_data_grc_addr (OUT): trace data GRC address in bytes
+ * - trace_data_size (OUT): trace data size in bytes (without the header)
*/
static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *trace_data_grc_addr,
- u32 *trace_data_size_bytes)
+ u32 *trace_data_size)
{
- /* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
- u32 signature;
+ u32 spad_trace_offsize, signature;
- /* Extract MCP trace section GRC address from offsize structure (within
- * scratchpad).
- */
+ /* Read trace section offsize structure from MCP scratchpad */
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Extract trace section address from offsize (in scratchpad) */
*trace_data_grc_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
@@ -4197,42 +4514,41 @@ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
signature = qed_rd(p_hwfn, p_ptt,
*trace_data_grc_addr +
offsetof(struct mcp_trace, signature));
+
if (signature != MFW_TRACE_SIGNATURE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read trace size from MCP trace section */
- *trace_data_size_bytes = qed_rd(p_hwfn,
- p_ptt,
- *trace_data_grc_addr +
- offsetof(struct mcp_trace, size));
+ *trace_data_size = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+
return DBG_STATUS_OK;
}
-/* Reads MCP trace meta data image from NVRAM.
- * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
- * file)
- * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
- * Trace meta data starts (invalid when loaded from file)
- * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+/* Reads MCP trace meta data image from NVRAM
+ * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
+ * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
+ * loaded from file).
+ * - trace_meta_size (OUT): size in bytes of the trace meta data.
*/
static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 trace_data_size_bytes,
u32 *running_bundle_id,
- u32 *trace_meta_offset_bytes,
- u32 *trace_meta_size_bytes)
+ u32 *trace_meta_offset,
+ u32 *trace_meta_size)
{
+ u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
+
/* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
/* Find running bundle ID */
- u32 running_mfw_addr =
+ running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- u32 nvram_image_type;
-
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
if (*running_bundle_id > 1)
return DBG_STATUS_INVALID_NVRAM_BUNDLE;
@@ -4241,40 +4557,33 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-
return qed_find_nvram_image(p_hwfn,
p_ptt,
nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
+ trace_meta_offset, trace_meta_size);
}
-/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
- * buffer.
- */
+/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_in_bytes,
u32 size_in_bytes, u32 *buf)
{
- u8 *byte_buf = (u8 *)buf;
- u8 modules_num, i;
+ u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
+ enum dbg_status status;
u32 signature;
/* Read meta data from NVRAM */
- enum dbg_status status = qed_nvram_read(p_hwfn,
- p_ptt,
- nvram_offset_in_bytes,
- size_in_bytes,
- buf);
-
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes, size_in_bytes, buf);
if (status != DBG_STATUS_OK)
return status;
/* Extract and check first signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Extract number of modules */
@@ -4282,16 +4591,16 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
/* Skip all modules */
for (i = 0; i < modules_num; i++) {
- u8 module_len = *(byte_buf++);
-
+ module_len = *(byte_buf++);
byte_buf += module_len;
}
/* Extract and check second signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
return DBG_STATUS_OK;
}
@@ -4308,10 +4617,10 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool mcp_access;
int halted = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
-
*num_dumped_dwords = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
p_ptt,
@@ -4328,7 +4637,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "dump-type", "mcp-trace");
/* Halt MCP while reading from scratchpad so the read data will be
- * consistent if halt fails, MCP trace is taken anyway, with a small
+ * consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
if (dump && mcp_access) {
@@ -4339,8 +4648,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Find trace data size */
trace_data_size_dwords =
- DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
- BYTES_IN_DWORD);
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
/* Dump trace data section header and param */
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -4354,17 +4663,17 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
BYTES_TO_DWORDS(trace_data_grc_addr),
- trace_data_size_dwords);
+ trace_data_size_dwords, false);
/* Resume MCP (only if halt succeeded) */
- if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
/* Dump trace meta section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "mcp_trace_meta", 1);
- /* Read trace meta info */
+ /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
if (mcp_access) {
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
@@ -4391,6 +4700,9 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
if (status == DBG_STATUS_OK)
offset += trace_meta_size_dwords;
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
*num_dumped_dwords = offset;
/* If no mcp access, indicate that the dump doesn't contain the meta
@@ -4405,7 +4717,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4417,8 +4729,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "reg-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "reg_fifo_data", 1);
@@ -4430,8 +4742,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
* test how much data is available, except for reading it.
*/
offset += REG_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4456,8 +4767,12 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4467,7 +4782,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4479,8 +4794,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "igu-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "igu_fifo_data", 1);
@@ -4492,8 +4807,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
* test how much data is available, except for reading it.
*/
offset += IGU_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4519,8 +4833,12 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4531,7 +4849,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
bool dump,
u32 *num_dumped_dwords)
{
- u32 offset = 0, size_param_offset, override_window_dwords;
+ u32 size_param_offset, override_window_dwords, offset = 0;
*num_dumped_dwords = 0;
@@ -4542,8 +4860,8 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "protection-override");
- /* Dump data section header and param. The size param is 0 for now, and
- * is overwritten after reading the data.
+ /* Dump data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the data.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "protection_override_data", 1);
@@ -4552,8 +4870,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
if (!dump) {
offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
/* Add override window info to buffer */
@@ -4569,8 +4886,12 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
offset += override_window_dwords;
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4593,11 +4914,14 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump, 1);
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
+
+ /* Find Storm dump size */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ if (dev_data->block_in_reset[storm->block_id])
continue;
/* Read FW info for the current Storm */
@@ -4606,26 +4930,26 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
asserts = &fw_info.fw_asserts_section;
/* Dump FW Asserts section header and params */
- storm_letter_str[0] = s_storm_defs[storm_id].letter;
- offset += qed_dump_section_hdr(dump_buf + offset, dump,
- "fw_asserts", 2);
- offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
- storm_letter_str);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ storm_letter_str[0] = storm->letter;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "storm", storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
asserts->list_element_dword_size);
+ /* Read and dump FW Asserts data */
if (!dump) {
offset += asserts->list_element_dword_size;
continue;
}
- /* Read and dump FW Asserts data */
- fw_asserts_section_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
+ fw_asserts_section_addr = storm->sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
- next_list_idx_addr =
- fw_asserts_section_addr +
+ next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
@@ -4638,11 +4962,13 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
qed_grc_dump_addr_range(p_hwfn, p_ptt,
dump_buf + offset,
dump, addr,
- asserts->list_element_dword_size);
+ asserts->list_element_dword_size,
+ false);
}
/* Dump last section */
- offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
@@ -4650,10 +4976,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
- /* Convert binary data to debug arrays */
struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
+ /* convert binary data to debug arrays */
for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
@@ -4682,14 +5008,17 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4702,12 +5031,14 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4724,25 +5055,31 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct idle_chk_data *idle_chk;
+ enum dbg_status status;
+ idle_chk = &dev_data->idle_chk;
*buf_size = 0;
+
+ status = qed_dbg_dev_init(p_hwfn, p_ptt);
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- if (!dev_data->idle_chk.buf_size_set) {
- dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
- p_ptt,
- NULL, false);
- dev_data->idle_chk.buf_size_set = true;
+
+ if (!idle_chk->buf_size_set) {
+ idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt, NULL, false);
+ idle_chk->buf_size_set = true;
}
- *buf_size = dev_data->idle_chk.buf_size;
+ *buf_size = idle_chk->buf_size;
+
return DBG_STATUS_OK;
}
@@ -4755,12 +5092,14 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4783,8 +5122,10 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4797,13 +5138,12 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- /* validate buffer size */
status =
- qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
- if (status != DBG_STATUS_OK &&
- status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK && status !=
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
if (buf_size_in_dwords < needed_buf_size_in_dwords)
@@ -4829,8 +5169,10 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4843,12 +5185,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4871,8 +5215,10 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4885,12 +5231,14 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4913,8 +5261,10 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_protection_override_dump(p_hwfn,
p_ptt, NULL, false, buf_size);
}
@@ -4925,15 +5275,18 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4958,12 +5311,15 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+
return DBG_STATUS_OK;
}
@@ -4973,24 +5329,108 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ u8 reg_idx, num_attn_regs, num_result_regs = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ attn_reg_arr = qed_get_block_attn_regs(block_id,
+ attn_type, &num_attn_regs);
+
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
+ struct dbg_attn_reg_result *reg_result;
+ u32 sts_addr, sts_val;
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match - read attention status register */
+ sts_addr = DWORDS_TO_BYTES(clear_status ?
+ reg_data->sts_clr_address :
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS));
+ sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
+ if (!sts_val)
+ continue;
+
+ /* Non-zero attention status - add to results */
+ reg_result = &results->reg_results[num_result_regs];
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
+ GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
+ reg_result->block_attn_offset = reg_data->block_attn_offset;
+ reg_result->sts_val = sts_val;
+ reg_result->mask_val = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES
+ (reg_data->mask_address));
+ num_result_regs++;
+ }
+
+ results->block_id = (u8)block_id;
+ results->names_offset =
+ qed_get_block_attn_data(block_id, attn_type)->names_offset;
+ SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
+ SET_FIELD(results->data,
+ DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
+
return DBG_STATUS_OK;
}
/******************************* Data Types **********************************/
+struct block_info {
+ const char *name;
+ enum block_id id;
+};
+
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
@@ -5005,9 +5445,14 @@ struct mcp_trace_format {
#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+
char *format_str;
};
+/* Meta data structure, generated by a perl script during MFW build. therefore,
+ * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
+ * script.
+ */
struct mcp_trace_meta {
u32 modules_num;
char **modules;
@@ -5015,7 +5460,7 @@ struct mcp_trace_meta {
struct mcp_trace_format *formats;
};
-/* Reg fifo element */
+/* REG fifo element */
struct reg_fifo_element {
u64 data;
#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
@@ -5140,12 +5585,15 @@ struct igu_fifo_addr_data {
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
+
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
@@ -5154,59 +5602,269 @@ struct igu_fifo_addr_data {
/***************************** Constant Arrays *******************************/
+struct user_dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Debug arrays */
+static struct user_dbg_array
+s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+
+/* Block names array */
+static struct block_info s_block_info_arr[] = {
+ {"grc", BLOCK_GRC},
+ {"miscs", BLOCK_MISCS},
+ {"misc", BLOCK_MISC},
+ {"dbu", BLOCK_DBU},
+ {"pglue_b", BLOCK_PGLUE_B},
+ {"cnig", BLOCK_CNIG},
+ {"cpmu", BLOCK_CPMU},
+ {"ncsi", BLOCK_NCSI},
+ {"opte", BLOCK_OPTE},
+ {"bmb", BLOCK_BMB},
+ {"pcie", BLOCK_PCIE},
+ {"mcp", BLOCK_MCP},
+ {"mcp2", BLOCK_MCP2},
+ {"pswhst", BLOCK_PSWHST},
+ {"pswhst2", BLOCK_PSWHST2},
+ {"pswrd", BLOCK_PSWRD},
+ {"pswrd2", BLOCK_PSWRD2},
+ {"pswwr", BLOCK_PSWWR},
+ {"pswwr2", BLOCK_PSWWR2},
+ {"pswrq", BLOCK_PSWRQ},
+ {"pswrq2", BLOCK_PSWRQ2},
+ {"pglcs", BLOCK_PGLCS},
+ {"ptu", BLOCK_PTU},
+ {"dmae", BLOCK_DMAE},
+ {"tcm", BLOCK_TCM},
+ {"mcm", BLOCK_MCM},
+ {"ucm", BLOCK_UCM},
+ {"xcm", BLOCK_XCM},
+ {"ycm", BLOCK_YCM},
+ {"pcm", BLOCK_PCM},
+ {"qm", BLOCK_QM},
+ {"tm", BLOCK_TM},
+ {"dorq", BLOCK_DORQ},
+ {"brb", BLOCK_BRB},
+ {"src", BLOCK_SRC},
+ {"prs", BLOCK_PRS},
+ {"tsdm", BLOCK_TSDM},
+ {"msdm", BLOCK_MSDM},
+ {"usdm", BLOCK_USDM},
+ {"xsdm", BLOCK_XSDM},
+ {"ysdm", BLOCK_YSDM},
+ {"psdm", BLOCK_PSDM},
+ {"tsem", BLOCK_TSEM},
+ {"msem", BLOCK_MSEM},
+ {"usem", BLOCK_USEM},
+ {"xsem", BLOCK_XSEM},
+ {"ysem", BLOCK_YSEM},
+ {"psem", BLOCK_PSEM},
+ {"rss", BLOCK_RSS},
+ {"tmld", BLOCK_TMLD},
+ {"muld", BLOCK_MULD},
+ {"yuld", BLOCK_YULD},
+ {"xyld", BLOCK_XYLD},
+ {"ptld", BLOCK_PTLD},
+ {"ypld", BLOCK_YPLD},
+ {"prm", BLOCK_PRM},
+ {"pbf_pb1", BLOCK_PBF_PB1},
+ {"pbf_pb2", BLOCK_PBF_PB2},
+ {"rpb", BLOCK_RPB},
+ {"btb", BLOCK_BTB},
+ {"pbf", BLOCK_PBF},
+ {"rdif", BLOCK_RDIF},
+ {"tdif", BLOCK_TDIF},
+ {"cdu", BLOCK_CDU},
+ {"ccfc", BLOCK_CCFC},
+ {"tcfc", BLOCK_TCFC},
+ {"igu", BLOCK_IGU},
+ {"cau", BLOCK_CAU},
+ {"rgfs", BLOCK_RGFS},
+ {"rgsrc", BLOCK_RGSRC},
+ {"tgfs", BLOCK_TGFS},
+ {"tgsrc", BLOCK_TGSRC},
+ {"umac", BLOCK_UMAC},
+ {"xmac", BLOCK_XMAC},
+ {"dbg", BLOCK_DBG},
+ {"nig", BLOCK_NIG},
+ {"wol", BLOCK_WOL},
+ {"bmbn", BLOCK_BMBN},
+ {"ipc", BLOCK_IPC},
+ {"nwm", BLOCK_NWM},
+ {"nws", BLOCK_NWS},
+ {"ms", BLOCK_MS},
+ {"phy_pcie", BLOCK_PHY_PCIE},
+ {"led", BLOCK_LED},
+ {"avs_wrap", BLOCK_AVS_WRAP},
+ {"misc_aeu", BLOCK_MISC_AEU},
+ {"bar0_map", BLOCK_BAR0_MAP}
+};
+
/* Status string array */
static const char * const s_status_str[] = {
+ /* DBG_STATUS_OK */
"Operation completed successfully",
+
+ /* DBG_STATUS_APP_VERSION_NOT_SET */
"Debug application version wasn't set",
+
+ /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
"Unsupported debug application version",
+
+ /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
"The debug block wasn't reset since the last recording",
+
+ /* DBG_STATUS_INVALID_ARGS */
"Invalid arguments",
+
+ /* DBG_STATUS_OUTPUT_ALREADY_SET */
"The debug output was already set",
+
+ /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
"Invalid PCI buffer size",
+
+ /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
"PCI buffer allocation failed",
+
+ /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
+
+ /* DBG_STATUS_TOO_MANY_INPUTS */
"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
- "GRC/Timestamp input overlap in cycle dword 0",
+
+ /* DBG_STATUS_INPUT_OVERLAP */
+ "Overlapping debug bus inputs",
+
+ /* DBG_STATUS_HW_ONLY_RECORDING */
"Cannot record Storm data since the entire recording cycle is used by HW",
+
+ /* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
+
+ /* DBG_STATUS_STORM_NOT_ENABLED */
"The specified Storm wasn't enabled",
+
+ /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
"The block was already enabled",
+
+ /* DBG_STATUS_BLOCK_NOT_ENABLED */
"The specified block wasn't enabled",
+
+ /* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
+
+ /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
"Filters and triggers are not allowed when recording in 64b units",
+
+ /* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
+
+ /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
"The trigger was already enabled",
+
+ /* DBG_STATUS_TRIGGER_NOT_ENABLED */
"The trigger wasn't enabled",
+
+ /* DBG_STATUS_CANT_ADD_CONSTRAINT */
"A constraint can be added only after a filter was enabled or a trigger state was added",
+
+ /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
"Cannot add more than 3 trigger states",
+
+ /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
"Cannot add more than 4 constraints per filter or trigger state",
+
+ /* DBG_STATUS_RECORDING_NOT_STARTED */
"The recording wasn't started",
+
+ /* DBG_STATUS_DATA_DIDNT_TRIGGER */
"A trigger was configured, but it didn't trigger",
+
+ /* DBG_STATUS_NO_DATA_RECORDED */
"No data was recorded",
+
+ /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
"Dump buffer is too small",
+
+ /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
"Dumped data is not aligned to chunks",
+
+ /* DBG_STATUS_UNKNOWN_CHIP */
"Unknown chip",
+
+ /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
"Failed allocating virtual memory",
+
+ /* DBG_STATUS_BLOCK_IN_RESET */
"The input block is in reset",
+
+ /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
"Invalid MCP trace signature found in NVRAM",
+
+ /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
"Invalid bundle ID found in NVRAM",
+
+ /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
"Failed getting NVRAM image",
+
+ /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
"NVRAM image is not dword-aligned",
+
+ /* DBG_STATUS_NVRAM_READ_FAILED */
"Failed reading from NVRAM",
+
+ /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
"Idle check parsing failed",
+
+ /* DBG_STATUS_MCP_TRACE_BAD_DATA */
"MCP Trace data is corrupt",
- "Dump doesn't contain meta data - it must be provided in an image file",
+
+ /* DBG_STATUS_MCP_TRACE_NO_META */
+ "Dump doesn't contain meta data - it must be provided in image file",
+
+ /* DBG_STATUS_MCP_COULD_NOT_HALT */
"Failed to halt MCP",
+
+ /* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
+
+ /* DBG_STATUS_DMAE_FAILED */
"DMAE transaction failed",
+
+ /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
+
+ /* DBG_STATUS_IGU_FIFO_BAD_DATA */
"IGU FIFO data is corrupt",
+
+ /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
"MCP failed to mask parities",
+
+ /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
"FW Asserts parsing failed",
+
+ /* DBG_STATUS_REG_FIFO_BAD_DATA */
"GRC FIFO data is corrupt",
+
+ /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
"Protection Override data is corrupt",
+
+ /* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+
+ /* DBG_STATUS_FILTER_BUG */
+ "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+
+ /* DBG_STATUS_NON_MATCHING_LINES */
+ "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+
+ /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
+ "The selected trigger dword offset wasn't enabled in the recorded HW block",
+
+ /* DBG_STATUS_DBG_BUS_IN_USE */
+ "The debug bus is in use"
};
/* Idle check severity names array */
@@ -5223,12 +5881,13 @@ static const char * const s_mcp_trace_level_str[] = {
"DEBUG"
};
-/* Parsing strings */
+/* Access type names array */
static const char * const s_access_strs[] = {
"read",
"write"
};
+/* Privilege type names array */
static const char * const s_privilege_strs[] = {
"VF",
"PDA",
@@ -5236,6 +5895,7 @@ static const char * const s_privilege_strs[] = {
"UA"
};
+/* Protection type names array */
static const char * const s_protection_strs[] = {
"(default)",
"(default)",
@@ -5247,6 +5907,7 @@ static const char * const s_protection_strs[] = {
"override UA"
};
+/* Master type names array */
static const char * const s_master_strs[] = {
"???",
"pxp",
@@ -5266,6 +5927,7 @@ static const char * const s_master_strs[] = {
"???"
};
+/* REG FIFO error messages array */
static const char * const s_reg_fifo_error_strs[] = {
"grc timeout",
"address doesn't belong to any block",
@@ -5274,6 +5936,7 @@ static const char * const s_reg_fifo_error_strs[] = {
"path isolation error"
};
+/* IGU FIFO sources array */
static const char * const s_igu_fifo_source_strs[] = {
"TSTORM",
"MSTORM",
@@ -5288,6 +5951,7 @@ static const char * const s_igu_fifo_source_strs[] = {
"GRC",
};
+/* IGU FIFO error messages */
static const char * const s_igu_fifo_error_strs[] = {
"no error",
"length error",
@@ -5308,13 +5972,18 @@ static const char * const s_igu_fifo_error_strs[] = {
/* IGU FIFO address data */
static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
- {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
- {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x0, 0x101, "MSI-X Memory", NULL,
+ IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL,
+ IGU_ADDR_TYPE_WRITE_PBA},
{0x201, 0x201, "Write PBA[64:127]", "reserved",
IGU_ADDR_TYPE_WRITE_PBA},
- {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
- {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x202, 0x202, "Write PBA[128]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
IGU_ADDR_TYPE_WRITE_INT_ACK},
{0x5f0, 0x5f0, "Attention bits update", NULL,
@@ -5331,8 +6000,10 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
IGU_ADDR_TYPE_READ_INT},
{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
IGU_ADDR_TYPE_READ_INT},
- {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+ {0x5f7, 0x5ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
};
/******************************** Variables **********************************/
@@ -5340,28 +6011,12 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
/* MCP Trace meta data - used in case the dump doesn't contain the meta data
* (e.g. due to no NVRAM access).
*/
-static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
/* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN];
-/***************************** Public Functions *******************************/
-
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
-{
- /* Convert binary data to debug arrays */
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
- u8 buf_id;
-
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
-
- return DBG_STATUS_OK;
-}
+/**************************** Private Functions ******************************/
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
@@ -5381,10 +6036,8 @@ static u32 qed_read_from_cyclic_buf(void *buf,
u32 *offset,
u32 buf_size, u8 num_bytes_to_read)
{
- u8 *bytes_buf = (u8 *)buf;
- u8 *val_ptr;
+ u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
u32 val = 0;
- u8 i;
val_ptr = (u8 *)&val;
@@ -5412,6 +6065,7 @@ static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
*offset += 4;
+
return dword_val;
}
@@ -5445,7 +6099,7 @@ static u32 qed_read_param(u32 *dump_buf,
const char **param_str_val, u32 *param_num_val)
{
char *char_buf = (char *)dump_buf;
- u32 offset = 0; /* In bytes */
+ size_t offset = 0;
/* Extract param name */
*param_name = char_buf;
@@ -5493,37 +6147,31 @@ static u32 qed_print_section_params(u32 *dump_buf,
u32 i, dump_offset = 0, results_offset = 0;
for (i = 0; i < num_section_params; i++) {
- const char *param_name;
- const char *param_str_val;
+ const char *param_name, *param_str_val;
u32 param_num_val = 0;
dump_offset += qed_read_param(dump_buf + dump_offset,
&param_name,
&param_str_val, &param_num_val);
+
if (param_str_val)
- /* String param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %s\n", param_name, param_str_val);
else if (strcmp(param_name, "fw-timestamp"))
- /* Numeric param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %d\n", param_name, param_num_val);
}
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
+ "\n");
+
*num_chars_printed = results_offset;
- return dump_offset;
-}
-const char *qed_dbg_get_status_str(enum dbg_status status)
-{
- return (status <
- MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+ return dump_offset;
}
/* Parses the idle check rules and returns the number of characters printed.
@@ -5537,7 +6185,10 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *num_errors, u32 *num_warnings)
{
- u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
+
+ u32 rule_idx;
u16 i, j;
*num_errors = 0;
@@ -5548,16 +6199,15 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
rule_idx++) {
const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
struct dbg_idle_chk_result_hdr *hdr;
- const char *parsing_str;
+ const char *parsing_str, *lsi_msg;
u32 parsing_str_offset;
- const char *lsi_msg;
- u8 curr_reg_id = 0;
bool has_fw_msg;
+ u8 curr_reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
(const struct dbg_idle_chk_rule_parsing_data *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
ptr[hdr->rule_id];
parsing_str_offset =
GET_FIELD(rule_parsing_data->data,
@@ -5565,16 +6215,18 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
has_fw_msg =
GET_FIELD(rule_parsing_data->data,
DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str = &((const char *)
- s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ parsing_str =
+ &((const char *)
+ s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
lsi_msg = parsing_str;
+ curr_reg_id = 0;
if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
return 0;
/* Skip rule header */
- dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
/* Update errors/warnings count */
if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
@@ -5606,19 +6258,19 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
for (i = 0;
i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
i++) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr
- = (struct dbg_idle_chk_result_reg_hdr *)
- dump_buf;
- bool is_mem =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
- u8 reg_id =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool is_mem;
+ u8 reg_id;
+
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
+ is_mem = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ reg_id = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
/* Skip reg header */
- dump_buf +=
- (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
/* Skip register names until the required reg_id is
* reached.
@@ -5660,6 +6312,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Check if end of dump buffer was exceeded */
if (dump_buf > dump_buf_end)
return 0;
+
return results_offset;
}
@@ -5680,13 +6333,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
u32 num_section_params = 0, num_rules;
- u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
*parsed_results_bytes = 0;
*num_errors = 0;
*num_warnings = 0;
- if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+
+ if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -5705,10 +6361,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
&section_name, &num_section_params);
if (strcmp(section_name, "idle_chk") || num_section_params != 1)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
-
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
- if (strcmp(param_name, "num_rules") != 0)
+ if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
if (num_rules) {
@@ -5728,7 +6383,7 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
/* Print LSI output */
@@ -5745,64 +6400,33 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
}
/* Print errors/warnings count */
- if (*num_errors) {
+ if (*num_errors)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"\nIdle Check failed!!! (with %d errors and %d warnings)\n",
*num_errors, *num_warnings);
- } else if (*num_warnings) {
+ else if (*num_warnings)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly (with %d warnings)\n",
+ "\nIdle Check completed successfully (with %d warnings)\n",
*num_warnings);
- } else {
+ else
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly\n");
- }
+ "\nIdle Check completed successfully\n");
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- u32 num_errors, num_warnings;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL,
- results_buf_size,
- &num_errors, &num_warnings);
-}
-
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors, u32 *num_warnings)
-{
- u32 parsed_buf_size;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf,
- &parsed_buf_size,
- num_errors, num_warnings);
+ return DBG_STATUS_OK;
}
/* Frees the specified MCP Trace meta data */
@@ -5841,12 +6465,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
/* Read first signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
- /* Read number of modules and allocate memory for all the modules
- * pointers.
- */
+ /* Read no. of modules and allocate memory for their pointers */
meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
if (!meta->modules)
@@ -5871,7 +6493,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
/* Read second signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read number of formats and allocate memory for all formats */
@@ -5919,10 +6541,10 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_mask, param_shift, param_num_val;
- u32 num_section_params, offset, end_offset, bytes_left;
+ u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
+ u32 param_mask, param_shift, param_num_val, num_section_params;
const char *section_name, *param_name, *param_str_val;
- u32 trace_data_dwords, trace_meta_dwords;
+ u32 offset, results_offset = 0;
struct mcp_trace_meta meta;
struct mcp_trace *trace;
enum dbg_status status;
@@ -5955,7 +6577,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Prepare trace info */
trace = (struct mcp_trace *)dump_buf;
- trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest;
end_offset = trace->trace_prod;
bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
@@ -5968,7 +6590,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_MCP_TRACE_BAD_DATA;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &param_num_val);
- if (strcmp(param_name, "size") != 0)
+ if (strcmp(param_name, "size"))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
trace_meta_dwords = param_num_val;
@@ -6028,6 +6650,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
format_ptr = &meta.formats[format_idx];
+
for (i = 0,
param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
@@ -6050,6 +6673,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
*/
if (param_size == 3)
param_size = 4;
+
if (bytes_left < param_size) {
status = DBG_STATUS_MCP_TRACE_BAD_DATA;
goto free_mem;
@@ -6059,13 +6683,14 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
&offset,
trace->size,
param_size);
+
bytes_left -= param_size;
}
format_level =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
format_module =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_MODULE_MASK) >>
@@ -6094,30 +6719,6 @@ free_mem:
return status;
}
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
-}
-
/* Parses a Reg FIFO dump buffer.
* If result_buf is not NULL, the Reg FIFO results are printed to it.
* In any case, the required results buffer size is assigned to
@@ -6130,10 +6731,11 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
u8 i, j, err_val, vf_val;
+ u32 results_offset = 0;
char vf_str[4];
/* Read global_params section */
@@ -6179,17 +6781,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
"raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ADDRESS) *
- REG_FIFO_ELEMENT_ADDR_FACTOR,
- s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
+ REG_FIFO_ELEMENT_PF),
+ vf_str,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PORT),
- s_privilege_strs[GET_FIELD(elements[i].
- data,
- REG_FIFO_ELEMENT_PRIVILEGE)],
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
@@ -6201,18 +6803,18 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
REG_FIFO_ELEMENT_ERROR);
j < ARRAY_SIZE(s_reg_fifo_error_strs);
j++, err_val >>= 1) {
- if (!(err_val & 0x1))
- continue;
- if (err_printed)
+ if (err_val & 0x1) {
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ", ");
results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
+ sprintf(qed_get_buf_ptr
+ (results_buf, results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
}
results_offset +=
@@ -6225,31 +6827,140 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
+static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
+ *element, char
+ *results_buf,
+ u32 *results_offset,
+ u32 *parsed_results_bytes)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
+ const struct igu_fifo_addr_data *found_addr = NULL;
+ u8 source, err_type, i, is_cleanup;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u32 wr_data, prod_cons;
+ bool is_wr_cmd, is_pf;
+ u16 cmd_addr;
+ u64 dword12;
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
+ /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ dword12 = ((u64)element->dword2 << 32) | element->dword1;
+ is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ /* Find address data */
+ for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
+ const struct igu_fifo_addr_data *curr_addr =
+ &s_igu_fifo_addr_data[i];
+
+ if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
+ curr_addr->end_addr)
+ found_addr = curr_addr;
+ }
+
+ if (!found_addr)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (found_addr->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB = 0x%x", cmd_addr - found_addr->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ if (!is_wr_cmd) {
+ parsed_wr_data[0] = '\0';
+ goto out;
+ }
+
+ /* Prepare parsed write data */
+ wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
+ is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val, cleanup_type;
+
+ cleanup_val =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ cleanup_type =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag, en_dis_int_for_sb, segment;
+ u8 timer_mask;
+
+ update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
+ : "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+out:
+ /* Add parsed element to parsed buffer */
+ *results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ *results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
+ element->dword2, element->dword1,
+ element->dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(element->dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd",
+ cmd_addr,
+ (!is_pf && found_addr->vf_desc)
+ ? found_addr->vf_desc
+ : found_addr->desc,
+ parsed_addr_data,
+ parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+
+ return DBG_STATUS_OK;
}
/* Parses an IGU FIFO dump buffer.
@@ -6264,12 +6975,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct igu_fifo_element *elements;
- char parsed_addr_data[32];
- char parsed_wr_data[256];
- u8 i, j;
+ enum dbg_status status;
+ u32 results_offset = 0;
+ u8 i;
/* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6298,118 +7009,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- /* dword12 (dword index 1 and 2) contains bits 32..95 of the
- * FIFO element.
- */
- u64 dword12 =
- ((u64)elements[i].dword2 << 32) | elements[i].dword1;
- bool is_wr_cmd = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
- bool is_pf = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_IS_PF);
- u16 cmd_addr = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
- u8 source = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_SOURCE);
- u8 err_type = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
- const struct igu_fifo_addr_data *addr_data = NULL;
-
- if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
- if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Find address data */
- for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
- j++)
- if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
- cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
- addr_data = &s_igu_fifo_addr_data[j];
- if (!addr_data)
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Prepare parsed address data */
- switch (addr_data->type) {
- case IGU_ADDR_TYPE_MSIX_MEM:
- sprintf(parsed_addr_data,
- " vector_num=0x%x", cmd_addr / 2);
- break;
- case IGU_ADDR_TYPE_WRITE_INT_ACK:
- case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
- sprintf(parsed_addr_data,
- " SB=0x%x", cmd_addr - addr_data->start_addr);
- break;
- default:
- parsed_addr_data[0] = '\0';
- }
-
- /* Prepare parsed write data */
- if (is_wr_cmd) {
- u32 wr_data = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
- u32 prod_cons = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_PROD_CONS);
- u8 is_cleanup = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_CMD_TYPE);
-
- if (source == IGU_SRC_ATTN) {
- sprintf(parsed_wr_data,
- "prod: 0x%x, ", prod_cons);
- } else {
- if (is_cleanup) {
- u8 cleanup_val = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
- u8 cleanup_type = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
-
- sprintf(parsed_wr_data,
- "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
- cleanup_val ? "set" : "clear",
- cleanup_type);
- } else {
- u8 update_flag = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_UPDATE_FLAG);
- u8 en_dis_int_for_sb =
- GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
- u8 segment = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_SEGMENT);
- u8 timer_mask = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_TIMER_MASK);
-
- sprintf(parsed_wr_data,
- "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
- prod_cons,
- update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb ==
- 1 ? "disable" : "nop") :
- "enable",
- segment ? "attn" : "regular",
- timer_mask);
- }
- }
- } else {
- parsed_wr_data[0] = '\0';
- }
-
- /* Add parsed element to parsed buffer */
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
- elements[i].dword2, elements[i].dword1,
- elements[i].dword0,
- is_pf ? "pf" : "vf",
- GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_FID),
- s_igu_fifo_source_strs[source],
- is_wr_cmd ? "wr" : "rd", cmd_addr,
- (!is_pf && addr_data->vf_desc)
- ? addr_data->vf_desc : addr_data->desc,
- parsed_addr_data, parsed_wr_data,
- s_igu_fifo_error_strs[err_type]);
+ status = qed_parse_igu_fifo_element(&elements[i],
+ results_buf,
+ &results_offset,
+ parsed_results_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -6418,31 +7023,8 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return DBG_STATUS_OK;
}
static enum dbg_status
@@ -6452,9 +7034,10 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct protection_override_element *elements;
+ u32 results_offset = 0;
u8 i;
/* Read global_params section */
@@ -6477,7 +7060,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
&param_name, &param_str_val, &param_num_val);
if (strcmp(param_name, "size"))
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
- if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
elements = (struct protection_override_element *)dump_buf;
@@ -6486,7 +7069,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_elements; i++) {
u32 address = GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
- PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6512,33 +7095,8 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf,
- &parsed_buf_size);
+ return DBG_STATUS_OK;
}
/* Parses a FW Asserts dump buffer.
@@ -6553,7 +7111,7 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, num_section_params, param_num_val, i;
+ u32 num_section_params, param_num_val, i, results_offset = 0;
const char *param_name, *param_str_val, *section_name;
bool last_section_found = false;
@@ -6569,54 +7127,216 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf += qed_print_section_params(dump_buf,
num_section_params,
results_buf, &results_offset);
- while (!last_section_found) {
- const char *storm_letter = NULL;
- u32 storm_dump_size = 0;
+ while (!last_section_found) {
dump_buf += qed_read_section_hdr(dump_buf,
&section_name,
&num_section_params);
- if (!strcmp(section_name, "last")) {
- last_section_found = true;
- continue;
- } else if (strcmp(section_name, "fw_asserts")) {
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
+ if (!strcmp(section_name, "fw_asserts")) {
+ /* Extract params */
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
- /* Extract params */
- for (i = 0; i < num_section_params; i++) {
- dump_buf += qed_read_param(dump_buf,
- &param_name,
- &param_str_val,
- &param_num_val);
- if (!strcmp(param_name, "storm"))
- storm_letter = param_str_val;
- else if (!strcmp(param_name, "size"))
- storm_dump_size = param_num_val;
- else
+ if (!storm_letter || !storm_dump_size)
return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
- if (!storm_letter || !storm_dump_size)
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
-
- /* Print data */
- results_offset += sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "\n%sSTORM_ASSERT: size=%d\n",
- storm_letter, storm_dump_size);
- for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ /* Print data */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "%08x\n", *dump_buf);
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ } else if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ } else {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
}
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ u8 buf_id;
+
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
+ s_user_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_user_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
return DBG_STATUS_OK;
}
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
+{
+ s_mcp_trace_meta.ptr = data;
+ s_mcp_trace_meta.size_in_dwords = size;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
@@ -6641,6 +7361,88 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size);
}
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results)
+{
+ struct user_dbg_array *block_attn, *pstrings;
+ const u32 *block_attn_name_offsets;
+ enum dbg_attn_type attn_type;
+ const char *block_name;
+ u8 num_regs, i, j;
+
+ num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
+ attn_type = (enum dbg_attn_type)
+ GET_FIELD(results->data,
+ DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = s_block_info_arr[results->block_id].name;
+
+ if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
+ block_attn_name_offsets = &block_attn->ptr[results->names_offset];
+
+ /* Go over registers with a non-zero attention status */
+ for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_reg_result *reg_result;
+ struct dbg_attn_bit_mapping *mapping;
+ u8 num_reg_attn, bit_idx = 0;
+
+ reg_result = &results->reg_results[i];
+ num_reg_attn = GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
+ block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
+ mapping = &((struct dbg_attn_bit_mapping *)
+ block_attn->ptr)[reg_result->block_attn_offset];
+
+ pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
+
+ /* Go over attention status bits */
+ for (j = 0; j < num_reg_attn; j++) {
+ u16 attn_idx_val = GET_FIELD(mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_VAL);
+ const char *attn_name, *attn_type_str, *masked_str;
+ u32 name_offset, sts_addr;
+
+ /* Check if bit mask should be advanced (due to unused
+ * bits).
+ */
+ if (GET_FIELD(mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
+ bit_idx += (u8)attn_idx_val;
+ continue;
+ }
+
+ /* Check current bit index */
+ if (!(reg_result->sts_val & BIT(bit_idx))) {
+ bit_idx++;
+ continue;
+ }
+
+ /* Find attention name */
+ name_offset = block_attn_name_offsets[attn_idx_val];
+ attn_name = &((const char *)
+ pstrings->ptr)[name_offset];
+ attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
+ "Interrupt" : "Parity";
+ masked_str = reg_result->mask_val & BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr = GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr, bit_idx, masked_str);
+
+ bit_idx++;
+ }
+ }
+
+ return DBG_STATUS_OK;
+}
+
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index f872d7324814..ea1cc8eaa125 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -20,6 +20,9 @@ enum qed_dbg_features {
DBG_FEATURE_NUM
};
+/* Forward Declaration */
+struct qed_dev;
+
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 463927f17032..6c87bed13bd2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -62,19 +62,13 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
static DEFINE_SPINLOCK(qm_lock);
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
-/* API common to all protocols */
-enum BAR_ID {
- BAR_ID_0, /* used for GRC */
- BAR_ID_1 /* Used for doorbells */
-};
-
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
@@ -83,7 +77,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
u32 val;
if (IS_VF(p_hwfn->cdev))
- return 1 << 17;
+ return qed_vf_hw_bar_size(p_hwfn, bar_id);
val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
@@ -154,13 +148,17 @@ void qed_resc_free(struct qed_dev *cdev)
{
int i;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_free(&cdev->hwfns[i]);
return;
+ }
kfree(cdev->fw_data);
cdev->fw_data = NULL;
kfree(cdev->reset_stats);
+ cdev->reset_stats = NULL;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -168,20 +166,21 @@ void qed_resc_free(struct qed_dev *cdev)
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
qed_spq_free(p_hwfn);
- qed_eq_free(p_hwfn, p_hwfn->p_eq);
- qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_eq_free(p_hwfn);
+ qed_consq_free(p_hwfn);
qed_int_free(p_hwfn);
#ifdef CONFIG_QED_LL2
- qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_free(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_free(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
}
qed_iov_free(p_hwfn);
+ qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
}
@@ -217,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
break;
+ case QED_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
default:
DP_ERR(p_hwfn,
"unknown personality %d\n", p_hwfn->hw_info.personality);
@@ -299,7 +302,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -328,7 +331,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -692,7 +695,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
port = &(qm_info->qm_port_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -822,7 +825,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- p_hwfn->cdev->num_ports_in_engines,
+ p_hwfn->cdev->num_ports_in_engine,
GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -843,20 +846,18 @@ alloc_err:
int qed_resc_alloc(struct qed_dev *cdev)
{
- struct qed_iscsi_info *p_iscsi_info;
- struct qed_fcoe_info *p_fcoe_info;
- struct qed_ooo_info *p_ooo_info;
-#ifdef CONFIG_QED_LL2
- struct qed_ll2_info *p_ll2_info;
-#endif
u32 rdma_tasks, excess_tasks;
- struct qed_consq *p_consq;
- struct qed_eq *p_eq;
u32 line_count;
int i, rc = 0;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i) {
+ rc = qed_l2_alloc(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
return rc;
+ }
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
@@ -939,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ enum protocol_type rdma_proto;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ rdma_proto = PROTOCOLID_ROCE;
+ else
+ rdma_proto = PROTOCOLID_IWARP;
+
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ROCE,
+ rdma_proto,
NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
@@ -956,45 +964,42 @@ int qed_resc_alloc(struct qed_dev *cdev)
DP_ERR(p_hwfn,
"Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- rc = -EINVAL;
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
+ rc = qed_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_l2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) {
- p_ll2_info = qed_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
+ rc = qed_ll2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
- p_fcoe_info = qed_fcoe_alloc(p_hwfn);
- if (!p_fcoe_info)
- goto alloc_no_mem;
- p_hwfn->p_fcoe_info = p_fcoe_info;
+ rc = qed_fcoe_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- p_iscsi_info = qed_iscsi_alloc(p_hwfn);
- if (!p_iscsi_info)
- goto alloc_no_mem;
- p_hwfn->p_iscsi_info = p_iscsi_info;
- p_ooo_info = qed_ooo_alloc(p_hwfn);
- if (!p_ooo_info)
- goto alloc_no_mem;
- p_hwfn->p_ooo_info = p_ooo_info;
+ rc = qed_iscsi_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
/* DMA info initialization */
@@ -1025,16 +1030,19 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_setup(&cdev->hwfns[i]);
return;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
qed_cxt_mngr_setup(p_hwfn);
qed_spq_setup(p_hwfn);
- qed_eq_setup(p_hwfn, p_hwfn->p_eq);
- qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+ qed_eq_setup(p_hwfn);
+ qed_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -1044,17 +1052,18 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
- qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ qed_l2_setup(p_hwfn);
+ qed_iov_setup(p_hwfn);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
- qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_setup(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_setup(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
}
}
}
@@ -1122,7 +1131,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- switch (p_hwfn->cdev->num_ports_in_engines) {
+ switch (p_hwfn->cdev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
@@ -1134,7 +1143,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
break;
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
return -EINVAL;
}
@@ -1169,7 +1178,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
static void qed_init_cau_rt_data(struct qed_dev *cdev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, sb_id;
+ int i, igu_sb_id;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1179,15 +1188,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
p_igu_info = p_hwfn->hw_info.p_igu_info;
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
+
if (!p_block->is_pf)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
}
}
}
@@ -1241,6 +1252,10 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
L1_CACHE_BYTES, wr_mbs);
STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
@@ -1267,7 +1282,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
}
memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
@@ -1447,8 +1462,15 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
- return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
- p_hwfn->port_id, hw_mode);
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
+ return 0;
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -1527,7 +1549,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
@@ -1711,6 +1734,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
return mfw_rc;
}
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
@@ -1956,6 +1984,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt)
return -EAGAIN;
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info &&
+ p_hwfn->b_rdma_enabled_in_prs)
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
@@ -1968,6 +2003,7 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
{
qed_ptt_pool_free(p_hwfn);
kfree(p_hwfn->hw_info.p_igu_info);
+ p_hwfn->hw_info.p_igu_info = NULL;
}
/* Setup bar access */
@@ -2025,51 +2061,55 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
- struct qed_sb_cnt_info sb_cnt_info;
+ struct qed_sb_cnt_info sb_cnt;
u32 non_l2_sbs = 0;
+ memset(&sb_cnt, 0, sizeof(sb_cnt));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt);
+
if (IS_ENABLED(CONFIG_QED_RDMA) &&
- p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ QED_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
* the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have.
*/
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
+ min_t(u32, sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ];
}
-
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ if (QED_IS_L2_PERSONALITY(p_hwfn)) {
/* Start by allocating VF queues, then PF's */
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE),
- sb_cnt_info.sb_iov_cnt);
+ sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
- RESC_NUM(p_hwfn, QED_SB) -
- non_l2_sbs,
+ sb_cnt.cnt - non_l2_sbs,
RESC_NUM(p_hwfn,
QED_L2_QUEUE) -
FEAT_NUM(p_hwfn,
QED_VF_L2_QUE));
}
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn))
+ feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
- RESC_NUM(p_hwfn, QED_SB));
+ (int)sb_cnt.cnt);
}
const char *qed_hw_get_resc_name(enum qed_resources res_id)
@@ -2188,7 +2228,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
- struct qed_sb_cnt_info sb_cnt_info;
switch (res_id) {
case QED_L2_QUEUE:
@@ -2240,9 +2279,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = 1;
break;
case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.sb_cnt;
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
break;
default:
return -EINVAL;
@@ -2252,7 +2292,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_BDQ:
if (!*p_resc_num)
*p_resc_start = 0;
- else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
*p_resc_start = p_hwfn->port_id;
@@ -2311,11 +2351,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Special handling for status blocks; Would be revised in future */
- if (res_id == QED_SB) {
- *p_resc_num -= 1;
- *p_resc_start -= p_hwfn->enabled_func_idx;
- }
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -2413,6 +2448,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EINVAL;
}
+ /* This will also learn the number of SBs from MFW */
+ if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
+ return -EINVAL;
+
qed_hw_set_feat(p_hwfn);
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
@@ -2669,15 +2708,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->cdev->num_ports_in_engines = 2;
+ p_hwfn->cdev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2687,20 +2726,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
u32 port;
int i;
- p_hwfn->cdev->num_ports_in_engines = 0;
+ p_hwfn->cdev->num_ports_in_engine = 0;
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1)
- p_hwfn->cdev->num_ports_in_engines++;
+ p_hwfn->cdev->num_ports_in_engine++;
}
- if (!p_hwfn->cdev->num_ports_in_engines) {
+ if (!p_hwfn->cdev->num_ports_in_engine) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
/* Default num_ports_in_engine to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2819,12 +2858,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
- if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
- DP_NOTICE(cdev->hwfns,
- "The chip type/rev (BB A0) is not supported!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -3051,12 +3084,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev,
- pbl_size,
- p_chain->pbl_sp.p_virt_table,
- p_chain->pbl_sp.p_phys_table);
+
+ if (!p_chain->b_external_pbl)
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table);
out:
vfree(p_chain->pbl.pp_virt_addr_tbl);
+ p_chain->pbl.pp_virt_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -3150,7 +3186,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
return 0;
}
-static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+static int
+qed_chain_alloc_pbl(struct qed_dev *cdev,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
@@ -3170,8 +3209,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys, GFP_KERNEL);
+
+ if (!ext_pbl) {
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt)
@@ -3204,7 +3251,10 @@ int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc = 0;
@@ -3235,7 +3285,7 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
- rc = qed_chain_alloc_pbl(cdev, p_chain);
+ rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break;
}
if (rc)
@@ -4074,10 +4124,21 @@ static int qed_device_num_ports(struct qed_dev *cdev)
if (cdev->num_hwfns > 1)
return 1;
- return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+ return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
}
int qed_device_get_port_id(struct qed_dev *cdev)
{
return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
}
+
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 12d16c096e36..1f1df1bf127c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
* @param num_elems
* @param elem_size
* @param p_chain
+ * @param ext_pbl - a possible external PBL
*
* @return int
*/
@@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
/**
* @brief qed_chain_free - Free chain DMA memory
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 21a58fffd02b..df195c02b711 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -142,6 +141,15 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data = &p_ramrod->init_ramrod_data;
fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
+ /* Sanity */
+ if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
+ fcoe_pf_params->num_cqs,
+ p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
+ return -EINVAL;
+ }
+
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
p_data->sq_num_pages_in_pbl = tmp;
@@ -184,7 +192,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.queue_relative_offset = (u8)tmp;
for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
- tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
+ u16 igu_sb_id;
+
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
+ tmp = cpu_to_le16(igu_sb_id);
p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
}
@@ -539,7 +550,7 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
}
}
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_info *p_fcoe_info;
@@ -547,19 +558,21 @@ struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
if (!p_fcoe_info) {
DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
- return NULL;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&p_fcoe_info->free_list);
- return p_fcoe_info;
+
+ p_hwfn->p_fcoe_info = p_fcoe_info;
+ return 0;
}
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
struct fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
- spin_lock_init(&p_fcoe_info->lock);
+ spin_lock_init(&p_hwfn->p_fcoe_info->lock);
for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
rc = qed_cxt_get_task_ctx(p_hwfn, i,
QED_CTX_WORKING_MEM,
@@ -577,15 +590,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
}
}
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_free(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_conn *p_conn = NULL;
- if (!p_fcoe_info)
+ if (!p_hwfn->p_fcoe_info)
return;
- while (!list_empty(&p_fcoe_info->free_list)) {
- p_conn = list_first_entry(&p_fcoe_info->free_list,
+ while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
struct qed_fcoe_conn, list_entry);
if (!p_conn)
break;
@@ -593,7 +606,8 @@ void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
qed_fcoe_free_connection(p_hwfn, p_conn);
}
- kfree(p_fcoe_info);
+ kfree(p_hwfn->p_fcoe_info);
+ p_hwfn->p_fcoe_info = NULL;
}
static int
@@ -734,6 +748,11 @@ static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
info->secondary_bdq_rq_addr =
qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->wwpn = hwfn->mcp_info->func_info.wwn_port;
+ info->wwnn = hwfn->mcp_info->func_info.wwn_node;
+
+ info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
+
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
index 472af34a171d..027a76ac839a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
@@ -49,29 +49,21 @@ struct qed_fcoe_info {
};
#if IS_ENABLED(CONFIG_QED_FCOE)
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_free(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats);
#else /* CONFIG_QED_FCOE */
-static inline struct qed_fcoe_info *
-qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
- return NULL;
+ return -EINVAL;
}
-static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
-
-static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 858a57a73589..31fb0bffa098 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -46,6 +46,7 @@
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
+#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
#include <linux/qed/qed_fcoe_if.h>
@@ -346,7 +347,7 @@ struct xstorm_core_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -368,85 +369,85 @@ struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -681,7 +682,9 @@ struct core_rx_fast_path_cqe {
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved[4];
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
struct core_rx_gsi_offload_cqe {
@@ -692,7 +695,7 @@ struct core_rx_gsi_offload_cqe {
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
- u8 reserved1[2];
+ __le16 qp_id;
__le32 gid_dst[4];
};
@@ -774,15 +777,15 @@ struct core_tx_bd {
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x1
-#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
@@ -804,12 +807,12 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
-enum dcb_dhcp_update_flag {
- DONT_UPDATE_DCB_DHCP,
+enum dcb_dscp_update_mode {
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
UPDATE_DSCP,
UPDATE_DCB_DSCP,
- MAX_DCB_DHCP_UPDATE_FLAG
+ MAX_DCB_DSCP_UPDATE_MODE
};
struct eth_mstorm_per_pf_stat {
@@ -917,6 +920,14 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+enum iwarp_ll2_tx_queues {
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
/* Mstorm non-triggering VF zone */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
@@ -960,7 +971,7 @@ enum personality_type {
PERSONALITY_ISCSI,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
- PERSONALITY_RESERVED3,
+ PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
PERSONALITY_RESERVED4,
@@ -971,16 +982,12 @@ enum personality_type {
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
+ u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
};
@@ -990,6 +997,7 @@ struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1007,7 +1015,6 @@ struct pf_start_ramrod_data {
u8 pri_map_valid;
__le32 outer_tag;
struct hsi_fp_ver_struct hsi_fp_ver;
-
};
struct protocol_dcb_data {
@@ -1023,14 +1030,8 @@ struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
u8 update_rx_def_non_ucast_clss;
- u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
@@ -1038,17 +1039,17 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[2];
+ __le16 reserved;
};
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag;
- u8 update_fcoe_dcb_data_flag;
- u8 update_iscsi_dcb_data_flag;
- u8 update_roce_dcb_data_flag;
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag;
+ u8 update_eth_dcb_data_mode;
+ u8 update_fcoe_dcb_data_mode;
+ u8 update_iscsi_dcb_data_mode;
+ u8 update_roce_dcb_data_mode;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
@@ -1127,7 +1128,7 @@ struct tstorm_per_port_stat {
struct regpair iscsi_irregular_pkt;
struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
- struct regpair reserved;
+ struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1326,6 +1327,87 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1389,44 +1471,6 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
-
-struct mstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1541,50 +1585,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-struct ystorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le32 reg0;
- __le32 reg1;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le32 reg2;
- __le32 reg3;
-};
-
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
@@ -1643,6 +1643,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1656,6 +1658,10 @@ enum block_addr {
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -1669,10 +1675,6 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x19d0000,
- GRCBASE_TGFS = 0x19e0000,
- GRCBASE_PTLD = 0x19f0000,
- GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1734,8 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1745,6 +1749,10 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -1758,10 +1766,6 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_TGFS,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1780,6 +1784,10 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -1862,6 +1870,29 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+struct dbg_bus_block {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 lines_offset;
+};
+
+struct dbg_bus_block_user_data {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 names_offset;
+};
+
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
/* condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
@@ -1879,17 +1910,21 @@ struct dbg_dump_mem {
__le32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
/* register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
/* split header for registers dump */
@@ -1910,20 +1945,24 @@ struct dbg_idle_chk_cond_hdr {
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries; /* number of registers entries to check */
- u8 entry_size; /* size of registers entry (in dwords) */
- u8 start_entry; /* index of the first entry to check */
+ __le16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
};
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
@@ -1996,15 +2035,17 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
- u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
- u8 hw_id; /* HW ID associated with the block */
- u8 line_num; /* Debug line number to select */
- u8 right_shift; /* Number of units to right the debug data (0-3) */
- u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
- u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
- */
- u8 reserved;
+ __le16 data;
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 line_num;
+ u8 hw_id;
};
/* Debug Bus Clients */
@@ -2045,6 +2086,14 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS
};
+struct dbg_bus_trigger_state_data {
+ u8 data;
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2078,66 +2127,42 @@ union dbg_bus_storm_eid_params {
/* Debug Bus Storm data */
struct dbg_bus_storm_data {
- u8 fast_enabled;
- u8 fast_mode;
- u8 slow_enabled;
- u8 slow_mode;
+ u8 enabled;
+ u8 mode;
u8 hw_id;
u8 eid_filter_en;
u8 eid_range_not_mask;
u8 cid_filter_en;
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
__le32 cid;
};
/* Debug Bus data */
struct dbg_bus_data {
- __le32 app_version; /* The tools version number of the application */
- u8 state; /* The current debug bus state */
- u8 hw_dwords; /* HW dwords per cycle */
- u8 next_hw_id; /* Next HW ID to be associated with an input */
- u8 num_enabled_blocks; /* Number of blocks enabled for recording */
- u8 num_enabled_storms; /* Number of Storms enabled for recording */
- u8 target; /* Output target */
- u8 next_trigger_state; /* ID of next trigger state to be added */
- u8 next_constraint_id; /* ID of next filter/trigger constraint to be
- * added.
- */
- u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
- u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
- u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
- * (0/1).
- */
- u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
- u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
- u8 adding_filter; /* If true, the next added constraint belong to the
- * filter. Otherwise, it belongs to the last added
- * trigger state. Valid only if either filter or
- * triggers are enabled.
- */
- u8 filter_pre_trigger; /* Indicates if the recording filter should be
- * applied before the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 filter_post_trigger; /* Indicates if the recording filter should be
- * applied after the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
- * Otherwise, each input is assigned a different HW ID
- * (0/1).
- */
- u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
- * recording to this engine (0/1).
- */
- struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
- * only when the target is
- * DBG_BUS_TARGET_ID_PCI.
- */
+ __le32 app_version;
+ u8 state;
+ u8 hw_dwords;
+ __le16 hw_id_mask;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
__le16 reserved;
- struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
- struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+ u8 trigger_en;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ u8 unify_inputs;
+ u8 rcv_from_other_engine;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_storm_data storms[6];
};
enum dbg_bus_filter_types {
@@ -2156,12 +2181,6 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2185,19 +2204,19 @@ enum dbg_bus_pre_trigger_types {
};
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+ 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+ 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
/* Debug bus states */
enum dbg_bus_states {
- DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
- DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
- * recording.
- */
- DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
- DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
MAX_DBG_BUS_STATES
};
@@ -2216,11 +2235,8 @@ enum dbg_bus_storm_modes {
/* Debug bus target IDs */
enum dbg_bus_targets {
- /* records debug bus to DBG block internal buffer */
DBG_BUS_TARGET_ID_INT_BUF,
- /* records debug bus to the NW */
DBG_BUS_TARGET_ID_NIG,
- /* records debug bus to a PCI buffer */
DBG_BUS_TARGET_ID_PCI,
MAX_DBG_BUS_TARGETS
};
@@ -2235,48 +2251,45 @@ struct dbg_grc_data {
/* Debug GRC params */
enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
- DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
- DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
- DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
- DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
- DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
- DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
- DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
- DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
- DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
- DBG_GRC_PARAM_RESERVED, /* reserved */
- DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
- DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
- DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
- DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
- DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
- DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
- DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
- DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
- DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
- DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
- DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
- /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_RESERVED,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_NUM_LCIDS,
+ DBG_GRC_PARAM_NUM_LTIDS,
DBG_GRC_PARAM_EXCLUDE_ALL,
- /* preset: include memories for crash dump (1 only) */
DBG_GRC_PARAM_CRASH,
- /* perform dump only if MFW is responding (0/1) */
DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
- DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
@@ -2347,7 +2360,10 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -2364,25 +2380,22 @@ enum dbg_storms {
/* Idle Check data */
struct idle_chk_data {
- __le32 buf_size; /* Idle check buffer size in dwords */
- u8 buf_size_set; /* Indicates if the idle check buffer size was set
- * (0/1).
- */
+ __le32 buf_size;
+ u8 buf_size_set;
u8 reserved1;
__le16 reserved2;
};
/* Debug Tools data (per HW function) */
struct dbg_tools_data {
- struct dbg_grc_data grc; /* GRC Dump data */
- struct dbg_bus_data bus; /* Debug Bus data */
- struct idle_chk_data idle_chk; /* Idle Check data */
- u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
- */
- u8 chip_id; /* Chip ID (from enum chip_ids) */
- u8 platform_id; /* Platform ID (from enum platform_ids) */
- u8 initialized; /* Indicates if the data was initialized */
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[88];
+ u8 chip_id;
+ u8 platform_id;
+ u8 initialized;
u8 reserved;
};
@@ -2464,6 +2477,12 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_RESERVED,
+ MAX_CHIP_IDS
+};
struct fw_asserts_ram_section {
__le16 section_ram_line_offset;
@@ -2475,18 +2494,18 @@ struct fw_asserts_ram_section {
};
struct fw_ver_num {
- u8 major; /* Firmware major version number */
- u8 minor; /* Firmware minor version number */
- u8 rev; /* Firmware revision version number */
- u8 eng; /* Firmware engineering version number (for bootleg versions) */
+ u8 major;
+ u8 minor;
+ u8 rev;
+ u8 eng;
};
struct fw_ver_info {
- __le16 tools_ver; /* Tools version number */
- u8 image_id; /* FW image ID (e.g. main) */
+ __le16 tools_ver;
+ u8 image_id;
u8 reserved1;
- struct fw_ver_num num; /* FW version number */
- __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ struct fw_ver_num num;
+ __le32 timestamp;
__le32 reserved2;
};
@@ -2722,7 +2741,6 @@ struct init_read_op {
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
-
};
/* Init operations union */
@@ -2782,6 +2800,7 @@ struct iro {
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
@@ -2805,6 +2824,7 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
*
@@ -2824,6 +2844,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
* for idle check results.
@@ -2840,6 +2861,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
* into the specified buffer.
@@ -2860,6 +2882,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
* for mcp trace results.
@@ -2878,6 +2901,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
* into the specified buffer.
@@ -2902,6 +2926,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
* for grc trace fifo results.
@@ -2917,6 +2942,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
* the specified buffer.
@@ -2938,6 +2964,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
* for the IGU fifo results.
@@ -2954,6 +2981,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
* the specified buffer.
@@ -2975,6 +3003,7 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
* buffer size for protection override window results.
@@ -3048,6 +3077,29 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
+/**
+ * @brief qed_dbg_read_attn - Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param block - Block ID.
+ * @param attn_type - Attention type.
+ * @param clear_status - Indicates if the attention status should be cleared.
+ * @param results - OUT: Pointer to write the read results into
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
/**
* @brief qed_dbg_print_attn - Prints attention registers values in the
* specified results struct.
@@ -3074,6 +3126,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
*
@@ -3082,6 +3135,7 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
* @return a string for the specified status
*/
const char *qed_dbg_get_status_str(enum dbg_status status);
+
/**
* @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
* for idle check results (in bytes).
@@ -3116,6 +3170,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *num_errors,
u32 *num_warnings);
+
/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
@@ -3132,6 +3187,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_mcp_trace_results - Prints MCP Trace results
*
@@ -3146,6 +3202,7 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes).
@@ -3162,6 +3219,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_reg_fifo_results - Prints reg fifo results
*
@@ -3176,6 +3234,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
* for igu_fifo results (in bytes).
@@ -3192,6 +3251,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_igu_fifo_results - Prints IGU fifo results
*
@@ -3206,6 +3266,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_protection_override_results_buf_size - Returns the required
* buffer size for protection override results (in bytes).
@@ -3223,6 +3284,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_protection_override_results - Prints protection override
* results.
@@ -3238,6 +3300,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
* for FW Asserts results (in bytes).
@@ -3254,6 +3317,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_fw_asserts_results - Prints FW Asserts results
*
@@ -3268,6 +3332,283 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
+/**
+ * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @param p_hwfn - HW device data
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/* Debug Bus blocks */
+static const u32 dbg_bus_blocks[] = {
+ 0x0000000f, /* grc, bb, 15 lines */
+ 0x0000000f, /* grc, k2, 15 lines */
+ 0x00000000,
+ 0x00000000, /* miscs, bb, 0 lines */
+ 0x00000000, /* miscs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* misc, bb, 0 lines */
+ 0x00000000, /* misc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbu, bb, 0 lines */
+ 0x00000000, /* dbu, k2, 0 lines */
+ 0x00000000,
+ 0x000f0127, /* pglue_b, bb, 39 lines */
+ 0x0036012a, /* pglue_b, k2, 42 lines */
+ 0x00000000,
+ 0x00000000, /* cnig, bb, 0 lines */
+ 0x00120102, /* cnig, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* cpmu, bb, 0 lines */
+ 0x00000000, /* cpmu, k2, 0 lines */
+ 0x00000000,
+ 0x00000001, /* ncsi, bb, 1 lines */
+ 0x00000001, /* ncsi, k2, 1 lines */
+ 0x00000000,
+ 0x00000000, /* opte, bb, 0 lines */
+ 0x00000000, /* opte, k2, 0 lines */
+ 0x00000000,
+ 0x00600085, /* bmb, bb, 133 lines */
+ 0x00600085, /* bmb, k2, 133 lines */
+ 0x00000000,
+ 0x00000000, /* pcie, bb, 0 lines */
+ 0x00e50033, /* pcie, k2, 51 lines */
+ 0x00000000,
+ 0x00000000, /* mcp, bb, 0 lines */
+ 0x00000000, /* mcp, k2, 0 lines */
+ 0x00000000,
+ 0x01180009, /* mcp2, bb, 9 lines */
+ 0x01180009, /* mcp2, k2, 9 lines */
+ 0x00000000,
+ 0x01210104, /* pswhst, bb, 4 lines */
+ 0x01210104, /* pswhst, k2, 4 lines */
+ 0x00000000,
+ 0x01250103, /* pswhst2, bb, 3 lines */
+ 0x01250103, /* pswhst2, k2, 3 lines */
+ 0x00000000,
+ 0x00340101, /* pswrd, bb, 1 lines */
+ 0x00340101, /* pswrd, k2, 1 lines */
+ 0x00000000,
+ 0x01280119, /* pswrd2, bb, 25 lines */
+ 0x01280119, /* pswrd2, k2, 25 lines */
+ 0x00000000,
+ 0x01410109, /* pswwr, bb, 9 lines */
+ 0x01410109, /* pswwr, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* pswwr2, bb, 0 lines */
+ 0x00000000, /* pswwr2, k2, 0 lines */
+ 0x00000000,
+ 0x001c0001, /* pswrq, bb, 1 lines */
+ 0x001c0001, /* pswrq, k2, 1 lines */
+ 0x00000000,
+ 0x014a0015, /* pswrq2, bb, 21 lines */
+ 0x014a0015, /* pswrq2, k2, 21 lines */
+ 0x00000000,
+ 0x00000000, /* pglcs, bb, 0 lines */
+ 0x00120006, /* pglcs, k2, 6 lines */
+ 0x00000000,
+ 0x00100001, /* dmae, bb, 1 lines */
+ 0x00100001, /* dmae, k2, 1 lines */
+ 0x00000000,
+ 0x015f0105, /* ptu, bb, 5 lines */
+ 0x015f0105, /* ptu, k2, 5 lines */
+ 0x00000000,
+ 0x01640120, /* tcm, bb, 32 lines */
+ 0x01640120, /* tcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* mcm, bb, 32 lines */
+ 0x01640120, /* mcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ucm, bb, 32 lines */
+ 0x01640120, /* ucm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* xcm, bb, 32 lines */
+ 0x01640120, /* xcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ycm, bb, 32 lines */
+ 0x01640120, /* ycm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* pcm, bb, 32 lines */
+ 0x01640120, /* pcm, k2, 32 lines */
+ 0x00000000,
+ 0x01840062, /* qm, bb, 98 lines */
+ 0x01840062, /* qm, k2, 98 lines */
+ 0x00000000,
+ 0x01e60021, /* tm, bb, 33 lines */
+ 0x01e60021, /* tm, k2, 33 lines */
+ 0x00000000,
+ 0x02070107, /* dorq, bb, 7 lines */
+ 0x02070107, /* dorq, k2, 7 lines */
+ 0x00000000,
+ 0x00600185, /* brb, bb, 133 lines */
+ 0x00600185, /* brb, k2, 133 lines */
+ 0x00000000,
+ 0x020e0019, /* src, bb, 25 lines */
+ 0x020c001a, /* src, k2, 26 lines */
+ 0x00000000,
+ 0x02270104, /* prs, bb, 4 lines */
+ 0x02270104, /* prs, k2, 4 lines */
+ 0x00000000,
+ 0x022b0133, /* tsdm, bb, 51 lines */
+ 0x022b0133, /* tsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* msdm, bb, 51 lines */
+ 0x022b0133, /* msdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* usdm, bb, 51 lines */
+ 0x022b0133, /* usdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* xsdm, bb, 51 lines */
+ 0x022b0133, /* xsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* ysdm, bb, 51 lines */
+ 0x022b0133, /* ysdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* psdm, bb, 51 lines */
+ 0x022b0133, /* psdm, k2, 51 lines */
+ 0x00000000,
+ 0x025e010c, /* tsem, bb, 12 lines */
+ 0x025e010c, /* tsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* msem, bb, 12 lines */
+ 0x025e010c, /* msem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* usem, bb, 12 lines */
+ 0x025e010c, /* usem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* xsem, bb, 12 lines */
+ 0x025e010c, /* xsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* ysem, bb, 12 lines */
+ 0x025e010c, /* ysem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* psem, bb, 12 lines */
+ 0x025e010c, /* psem, k2, 12 lines */
+ 0x00000000,
+ 0x026a000d, /* rss, bb, 13 lines */
+ 0x026a000d, /* rss, k2, 13 lines */
+ 0x00000000,
+ 0x02770106, /* tmld, bb, 6 lines */
+ 0x02770106, /* tmld, k2, 6 lines */
+ 0x00000000,
+ 0x027d0106, /* muld, bb, 6 lines */
+ 0x027d0106, /* muld, k2, 6 lines */
+ 0x00000000,
+ 0x02770005, /* yuld, bb, 5 lines */
+ 0x02770005, /* yuld, k2, 5 lines */
+ 0x00000000,
+ 0x02830107, /* xyld, bb, 7 lines */
+ 0x027d0107, /* xyld, k2, 7 lines */
+ 0x00000000,
+ 0x00000000, /* ptld, bb, 0 lines */
+ 0x00000000, /* ptld, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* ypld, bb, 0 lines */
+ 0x00000000, /* ypld, k2, 0 lines */
+ 0x00000000,
+ 0x028a010e, /* prm, bb, 14 lines */
+ 0x02980110, /* prm, k2, 16 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb1, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb1, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb2, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb2, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* rpb, bb, 13 lines */
+ 0x02a8000d, /* rpb, k2, 13 lines */
+ 0x00000000,
+ 0x00600185, /* btb, bb, 133 lines */
+ 0x00600185, /* btb, k2, 133 lines */
+ 0x00000000,
+ 0x02b50117, /* pbf, bb, 23 lines */
+ 0x02b50117, /* pbf, k2, 23 lines */
+ 0x00000000,
+ 0x02cc0006, /* rdif, bb, 6 lines */
+ 0x02cc0006, /* rdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d20006, /* tdif, bb, 6 lines */
+ 0x02d20006, /* tdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d80003, /* cdu, bb, 3 lines */
+ 0x02db000e, /* cdu, k2, 14 lines */
+ 0x00000000,
+ 0x02e9010d, /* ccfc, bb, 13 lines */
+ 0x02f60117, /* ccfc, k2, 23 lines */
+ 0x00000000,
+ 0x02e9010d, /* tcfc, bb, 13 lines */
+ 0x02f60117, /* tcfc, k2, 23 lines */
+ 0x00000000,
+ 0x030d0133, /* igu, bb, 51 lines */
+ 0x030d0133, /* igu, k2, 51 lines */
+ 0x00000000,
+ 0x03400106, /* cau, bb, 6 lines */
+ 0x03400106, /* cau, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* rgfs, bb, 0 lines */
+ 0x00000000, /* rgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* rgsrc, bb, 0 lines */
+ 0x00000000, /* rgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgfs, bb, 0 lines */
+ 0x00000000, /* tgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgsrc, bb, 0 lines */
+ 0x00000000, /* tgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* umac, bb, 0 lines */
+ 0x00120006, /* umac, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* xmac, bb, 0 lines */
+ 0x00000000, /* xmac, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbg, bb, 0 lines */
+ 0x00000000, /* dbg, k2, 0 lines */
+ 0x00000000,
+ 0x0346012b, /* nig, bb, 43 lines */
+ 0x0346011d, /* nig, k2, 29 lines */
+ 0x00000000,
+ 0x00000000, /* wol, bb, 0 lines */
+ 0x001c0002, /* wol, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* bmbn, bb, 0 lines */
+ 0x00210008, /* bmbn, k2, 8 lines */
+ 0x00000000,
+ 0x00000000, /* ipc, bb, 0 lines */
+ 0x00000000, /* ipc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* nwm, bb, 0 lines */
+ 0x0371000b, /* nwm, k2, 11 lines */
+ 0x00000000,
+ 0x00000000, /* nws, bb, 0 lines */
+ 0x037c0009, /* nws, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* ms, bb, 0 lines */
+ 0x00120004, /* ms, k2, 4 lines */
+ 0x00000000,
+ 0x00000000, /* phy_pcie, bb, 0 lines */
+ 0x00e5001a, /* phy_pcie, k2, 26 lines */
+ 0x00000000,
+ 0x00000000, /* led, bb, 0 lines */
+ 0x00000000, /* led, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* avs_wrap, bb, 0 lines */
+ 0x00000000, /* avs_wrap, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
+};
+
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3589,37 +3930,37 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0x6518, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
{0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0x7f48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x60f8, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x61f8, 0x10, 0x0, 0x0, 0x10},
+ {0xbd20, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
{0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0xc7c8, 0x0, 0x0, 0x0, 0x4},
{0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8050, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x8150, 0x40, 0x0, 0x0, 0x30},
+ {0xec70, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf188, 0x78, 0x0, 0x0, 0x78},
+ {0xf1b0, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xacf0, 0x0, 0x0, 0x0, 0xf0},
- {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0xaef8, 0x0, 0x0, 0x0, 0xf0},
+ {0xafe8, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
@@ -3627,16 +3968,18 @@ static const struct iro iro_arr[47] = {
{0x0, 0x8, 0x0, 0x0, 0x8},
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12c38, 0x10, 0x0, 0x0, 0x8},
- {0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0xd9a8, 0x38, 0x0, 0x0, 0x24},
+ {0x12988, 0x10, 0x0, 0x0, 0x8},
+ {0x11fa0, 0x38, 0x0, 0x0, 0x18},
+ {0xa580, 0x38, 0x0, 0x0, 0x10},
{0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xde28, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5020, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b0, 0x30, 0x0, 0x0, 0x10},
+ {0xeec0, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -3724,361 +4067,359 @@ static const struct iro iro_arr[47] = {
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
-
-#define RUNTIME_ARRAY_SIZE 33927
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
+
+#define RUNTIME_ARRAY_SIZE 34309
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4307,7 +4648,7 @@ struct xstorm_eth_conn_ag_ctx {
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4340,7 +4681,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4627,6 +4968,7 @@ enum eth_error_code {
ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL,
MAX_ETH_ERROR_CODE
};
@@ -4879,6 +5221,39 @@ enum gft_logic_filter_type {
MAX_GFT_LOGIC_FILTER_TYPE
};
+struct rx_add_openflow_filter_data {
+ __le16 action_icid;
+ u8 priority;
+ u8 reserved0;
+ __le32 tenant_id;
+ __le16 dst_mac_hi;
+ __le16 dst_mac_mid;
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi;
+ __le16 src_mac_mid;
+ __le16 src_mac_lo;
+ __le16 vlan_id;
+ __le16 l2_eth_type;
+ u8 ipv4_dscp;
+ u8 ipv4_frag_type;
+ u8 ipv4_over_ip;
+ u8 tenant_id_exists;
+ __le32 ipv4_dst_addr;
+ __le32 ipv4_src_addr;
+ __le16 l4_dst_port;
+ __le16 l4_src_port;
+};
+
+struct rx_create_gft_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct rx_create_openflow_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4956,7 +5331,7 @@ struct rx_update_gft_filter_data {
u8 vport_id;
u8 filter_type;
u8 filter_action;
- u8 reserved;
+ u8 assert_on_error;
};
/* Ramrod data for rx queue start ramrod */
@@ -5102,203 +5477,6 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
-struct gft_cam_line_mapped {
- __le32 camline;
-#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
-#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
-};
-
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
-
-enum gft_profile_ip_version {
- GFT_PROFILE_IPV4 = 0,
- GFT_PROFILE_IPV6 = 1,
- MAX_GFT_PROFILE_IP_VERSION
-};
-
-enum gft_profile_upper_protocol_type {
- GFT_PROFILE_ROCE_PROTOCOL = 0,
- GFT_PROFILE_RROCE_PROTOCOL = 1,
- GFT_PROFILE_FCOE_PROTOCOL = 2,
- GFT_PROFILE_ICMP_PROTOCOL = 3,
- GFT_PROFILE_ARP_PROTOCOL = 4,
- GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
- GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
- GFT_PROFILE_TCP_PROTOCOL = 7,
- GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
- GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
- GFT_PROFILE_UDP_PROTOCOL = 10,
- GFT_PROFILE_USER_IP_1_INNER = 11,
- GFT_PROFILE_USER_IP_2_OUTER = 12,
- GFT_PROFILE_USER_ETH_1_INNER = 13,
- GFT_PROFILE_USER_ETH_2_OUTER = 14,
- GFT_PROFILE_RAW = 15,
- MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
-};
-
-struct gft_ram_line {
- __le32 low32bits;
-#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
-#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
-#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
-#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
-#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
-#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
-#define GFT_RAM_LINE_TTL_MASK 0x1
-#define GFT_RAM_LINE_TTL_SHIFT 18
-#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
-#define GFT_RAM_LINE_RESERVED0_MASK 0x1
-#define GFT_RAM_LINE_RESERVED0_SHIFT 20
-#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
-#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
-#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
-#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
-#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
-#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
-#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
-#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
-#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
-#define GFT_RAM_LINE_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_DST_PORT_SHIFT 30
-#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
-#define GFT_RAM_LINE_DSCP_MASK 0x1
-#define GFT_RAM_LINE_DSCP_SHIFT 0
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
-#define GFT_RAM_LINE_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_DST_IP_SHIFT 2
-#define GFT_RAM_LINE_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_SRC_IP_SHIFT 3
-#define GFT_RAM_LINE_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_PRIORITY_SHIFT 4
-#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
-#define GFT_RAM_LINE_VLAN_MASK 0x1
-#define GFT_RAM_LINE_VLAN_SHIFT 6
-#define GFT_RAM_LINE_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_DST_MAC_SHIFT 7
-#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
-#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
-#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
-#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
-#define GFT_RAM_LINE_RESERVED1_SHIFT 10
-};
-
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
struct xstorm_eth_conn_agctxdq_ext_ldpart {
u8 reserved0;
u8 eth_state;
@@ -5511,7 +5689,7 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5528,6 +5706,43 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
__le32 reg4;
};
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 eth_state;
@@ -5740,7 +5955,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5748,6 +5963,200 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+struct gft_profile_key {
+ __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -5827,12 +6236,9 @@ struct rdma_init_func_hdr {
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
- u8 cnp_vlan_priority;
- __le32 cnp_send_timeout;
- u8 cnp_dscp;
u8 vf_id;
u8 vf_valid;
- u8 reserved[5];
+ u8 reserved[3];
};
struct rdma_init_func_ramrod_data {
@@ -5856,54 +6262,55 @@ enum rdma_ramrod_cmd_id {
};
struct rdma_register_tid_ramrod_data {
- __le32 flags;
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ __le16 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
u8 vf_valid;
__le16 pd;
+ __le16 reserved2;
__le32 length_lo;
__le32 itid;
- __le32 reserved2;
+ __le32 reserved3;
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr;
struct regpair dif_runt_addr;
- __le32 reserved3[2];
+ __le32 reserved4[2];
};
struct rdma_resize_cq_output_params {
@@ -6149,6 +6556,233 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
struct mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
@@ -6438,233 +7072,6 @@ struct ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
- u8 reserved0;
- u8 state;
- u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
- u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
- u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
- u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
- u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
- u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 snd_nxt_psn;
- __le32 reg4;
-};
-
struct xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -6696,8 +7103,8 @@ struct xstorm_rdma_conn_ag_ctx {
#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -7093,16 +7500,35 @@ struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+struct roce_events_stats {
+ __le16 silent_drops;
+ __le16 rnr_naks_sent;
+ __le32 retransmit_count;
+ __le32 icrc_error_count;
+ __le32 reserved;
+};
+
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
+ ROCE_EVENT_CREATE_UD_QP,
+ ROCE_EVENT_DESTROY_UD_QP,
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_params {
+ u8 ll2_queue_id;
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ u8 reserved;
+ __le32 cnp_send_timeout;
+};
+
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
+ struct roce_init_func_params roce;
};
struct roce_modify_qp_req_ramrod_data {
@@ -7222,6 +7648,8 @@ enum roce_ramrod_cmd_id {
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
+ ROCE_RAMROD_CREATE_UD_QP,
+ ROCE_RAMROD_DESTROY_UD_QP,
MAX_ROCE_RAMROD_CMD_ID
};
@@ -7299,13 +7727,6 @@ struct mstorm_roce_resp_conn_ag_ctx {
__le32 reg1;
};
-enum roce_flavor {
- PLAIN_ROCE /* RoCE v1 */ ,
- RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
- RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
- MAX_ROCE_FLAVOR
-};
-
struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -7416,8 +7837,8 @@ struct tstorm_roce_resp_conn_ag_ctx {
u8 flags0;
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
@@ -8097,7 +8518,7 @@ struct xstorm_roce_resp_conn_ag_ctx {
__le16 irq_prod;
__le16 word3;
__le16 word4;
- __le16 word5;
+ __le16 ereserved1;
__le16 irq_cons;
u8 rxmit_opcode;
u8 byte4;
@@ -8200,6 +8621,812 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+enum roce_flavor {
+ PLAIN_ROCE,
+ RROCE_IPV4,
+ RROCE_IPV6,
+ MAX_ROCE_FLAVOR
+};
+
+struct ystorm_iwarp_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iwarp_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+struct xstorm_iwarp_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct xstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+ u8 flags2;
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 sq_comp_cons;
+ __le16 sq_tx_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 rewinded_snd_max;
+ __le32 rd_msn;
+ __le16 irq_prod_via_msdm;
+ __le16 irq_cons;
+ __le16 hq_cons_th_or_mpa_data;
+ __le16 hq_cons;
+ __le32 atom_msn;
+ __le32 orq_cons;
+ __le32 orq_cons_th;
+ u8 byte7;
+ u8 max_ord;
+ u8 wqe_data_pad_bytes;
+ u8 former_hq_prod;
+ u8 irq_prod_via_msem;
+ u8 byte12;
+ u8 max_pkt_pdu_size_lo;
+ u8 max_pkt_pdu_size_hi;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 e5_reserved4;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 shared_queue_page_addr_lo;
+ __le32 shared_queue_page_addr_hi;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 unaligned_nxt_seq;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 orq_cache_idx;
+ u8 hq_prod;
+ __le16 sq_tx_cons_th;
+ u8 orq_prod;
+ u8 irq_cons;
+ __le16 sq_tx_cons;
+ __le16 conn_dpi;
+ __le16 rq_prod;
+ __le32 snd_seq;
+ __le32 last_hq_sequence;
+};
+
+struct tstorm_iwarp_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct mstorm_iwarp_conn_st_ctx {
+ __le32 reserved[32];
+};
+
+struct ustorm_iwarp_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+struct iwarp_conn_context {
+ struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+};
+
+struct iwarp_create_qp_ramrod_data {
+ u8 flags;
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+ u8 reserved1;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 rq_num_pages;
+ __le32 reserved3[2];
+ struct regpair qp_handle_for_cqe;
+ struct rdma_srq_id srq_id;
+ __le32 cq_cid_for_sq;
+ __le32 cq_cid_for_rq;
+ __le16 dpi;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 reserved2[6];
+};
+
+enum iwarp_eqe_async_opcode {
+ IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
+ IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_CID_CLEANED,
+ IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
+ IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
+ IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ MAX_IWARP_EQE_ASYNC_OPCODE
+};
+
+struct iwarp_eqe_data_mpa_async_completion {
+ __le16 ulp_data_len;
+ u8 reserved[6];
+};
+
+struct iwarp_eqe_data_tcp_async_completion {
+ __le16 ulp_data_len;
+ u8 mpa_handshake_mode;
+ u8 reserved[5];
+};
+
+enum iwarp_eqe_sync_opcode {
+ IWARP_EVENT_TYPE_TCP_OFFLOAD =
+ 11,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
+ IWARP_EVENT_TYPE_CREATE_QP,
+ IWARP_EVENT_TYPE_QUERY_QP,
+ IWARP_EVENT_TYPE_MODIFY_QP,
+ IWARP_EVENT_TYPE_DESTROY_QP,
+ MAX_IWARP_EQE_SYNC_OPCODE
+};
+
+enum iwarp_fw_return_code {
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECTION_RST,
+ IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_ERROR_REJECT,
+ IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER,
+ IWARP_CONN_ERROR_MPA_RST,
+ IWARP_CONN_ERROR_MPA_FIN,
+ IWARP_CONN_ERROR_MPA_RTR_MISMATCH,
+ IWARP_CONN_ERROR_MPA_INSUF_IRD,
+ IWARP_CONN_ERROR_MPA_INVALID_PACKET,
+ IWARP_CONN_ERROR_MPA_LOCAL_ERROR,
+ IWARP_CONN_ERROR_MPA_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_TERMINATE,
+ IWARP_QP_IN_ERROR_GOOD_CLOSE,
+ IWARP_QP_IN_ERROR_BAD_CLOSE,
+ IWARP_EXCEPTION_DETECTED_LLP_CLOSED,
+ IWARP_EXCEPTION_DETECTED_LLP_RESET,
+ IWARP_EXCEPTION_DETECTED_IRQ_FULL,
+ IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
+ IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
+ IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
+ IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC,
+ IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR,
+ IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR,
+ IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED,
+ MAX_IWARP_FW_RETURN_CODE
+};
+
+struct iwarp_init_func_params {
+ u8 ll2_ooo_q_index;
+ u8 reserved1[7];
+};
+
+struct iwarp_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+ struct tcp_init_params tcp;
+ struct iwarp_init_func_params iwarp;
+};
+
+enum iwarp_modify_qp_new_state_type {
+ IWARP_MODIFY_QP_STATE_CLOSING = 1,
+ IWARP_MODIFY_QP_STATE_ERROR =
+ 2,
+ MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
+};
+
+struct iwarp_modify_qp_ramrod_data {
+ __le16 transition_to_state;
+ __le16 flags;
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+ __le32 reserved3[3];
+ __le32 reserved4[8];
+};
+
+struct mpa_rq_params {
+ __le32 ird;
+ __le32 ord;
+};
+
+struct mpa_ulp_buffer {
+ struct regpair addr;
+ __le16 len;
+ __le16 reserved[3];
+};
+
+struct mpa_outgoing_params {
+ u8 crc_needed;
+ u8 reject;
+ u8 reserved[6];
+ struct mpa_rq_params out_rq;
+ struct mpa_ulp_buffer outgoing_ulp_buffer;
+};
+
+struct iwarp_mpa_offload_ramrod_data {
+ struct mpa_outgoing_params common;
+ __le32 tcp_cid;
+ u8 mode;
+ u8 tcp_connect_side;
+ u8 rtr_pref;
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+ u8 reserved2;
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ struct regpair shared_queue_addr;
+ u8 stats_counter_id;
+ u8 reserved3[15];
+};
+
+struct iwarp_offload_params {
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 stats_counter_id;
+ u8 mpa_mode;
+ u8 reserved[10];
+};
+
+struct iwarp_query_qp_output_params {
+ __le32 flags;
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ u8 reserved1[4];
+};
+
+struct iwarp_query_qp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum iwarp_ramrod_cmd_id {
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
+ 11,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ IWARP_RAMROD_CMD_ID_QUERY_QP,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ MAX_IWARP_RAMROD_CMD_ID
+};
+
+struct iwarp_rxmit_stats_drv {
+ struct regpair tx_go_to_slow_start_event_cnt;
+ struct regpair tx_fast_retransmit_event_cnt;
+};
+
+struct iwarp_tcp_offload_ramrod_data {
+ struct iwarp_offload_params iwarp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+enum mpa_negotiation_mode {
+ MPA_NEGOTIATION_TYPE_BASIC = 1,
+ MPA_NEGOTIATION_TYPE_ENHANCED = 2,
+ MAX_MPA_NEGOTIATION_MODE
+};
+
+enum mpa_rtr_type {
+ MPA_RTR_TYPE_NONE = 0,
+ MPA_RTR_TYPE_ZERO_SEND = 1,
+ MPA_RTR_TYPE_ZERO_WRITE = 2,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3,
+ MPA_RTR_TYPE_ZERO_READ = 4,
+ MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5,
+ MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7,
+ MAX_MPA_RTR_TYPE
+};
+
+struct unaligned_opaque_data {
+ __le16 first_mpa_offset;
+ u8 tcp_payload_offset;
+ u8 flags;
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+ __le32 cid;
+};
+
+struct mstorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 rcq_cons;
+ __le16 rcq_cons_th;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ystorm_iwarp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9222,7 +10449,7 @@ struct xstorm_iscsi_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10285,9 +11512,11 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
@@ -10444,6 +11673,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
+#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
@@ -10451,7 +11681,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
@@ -10466,6 +11696,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
@@ -10489,7 +11721,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_BW_UPDATE5,
+ MFW_DRV_MSG_S_TAG_UPDATE,
MFW_DRV_MSG_GET_LAN_STATS,
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
@@ -10591,6 +11823,12 @@ struct nvm_cfg1_glob {
u32 led_global_settings;
u32 generic_cont1;
u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date;
u32 misc_sig;
u32 device_capabilities;
@@ -10758,6 +11996,8 @@ struct static_init {
u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
};
+#define NVM_MAGIC_VALUE 0x669955aa
+
enum nvm_image_type {
NVM_TYPE_TIM1 = 0x01,
NVM_TYPE_TIM2 = 0x02,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0a8fde629991..b069ad088269 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -40,31 +40,17 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
-enum cminterface {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
-};
-
-/* general constants */
+/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
+/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
+/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
#define QM_WFQ_UPPER_BOUND 62500000
@@ -106,20 +92,21 @@ enum cminterface {
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
_STRUCT_SIZE
@@ -146,16 +133,17 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+ /* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
@@ -167,7 +155,8 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
@@ -180,14 +169,15 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -200,7 +190,8 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
@@ -208,7 +199,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
}
/* Prepare runtime init values to allocate PBF command queue lines for
- * the specified VOQ
+ * the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
@@ -232,7 +223,7 @@ static void qed_cmdq_lines_rt_init(
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
@@ -285,7 +276,7 @@ static void qed_btb_blocks_rt_init(
if (!port_params[port_id].active)
continue;
- /* subtract headroom blocks */
+ /* Subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
@@ -305,7 +296,7 @@ static void qed_btb_blocks_rt_init(
phys_blocks = (usable_blocks - pure_lb_blocks) /
num_tcs_in_port;
- /* init physical TCs */
+ /* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) != 1)
@@ -317,7 +308,7 @@ static void qed_btb_blocks_rt_init(
phys_blocks);
}
- /* init pure LB TC */
+ /* Init pure LB TC */
temp = LB_VOQ(port_id);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
pure_lb_blocks);
@@ -338,24 +329,24 @@ static void qed_tx_pq_map_rt_init(
QM_PF_QUEUE_GROUP_SIZE;
u16 i, pq_id, pq_group;
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_vf_cids));
- /* go over all Tx PQs */
+ /* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
@@ -366,17 +357,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pq_params[i].vport_id <
MAX_QM_GLOBAL_RLS);
- /* update first Tx PQ of VPORT/TC */
+ /* Update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
pq_ids[p_params->pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
first_tx_pq_id,
@@ -388,7 +380,7 @@ static void qed_tx_pq_map_rt_init(
if (p_params->pq_params[i].rl_valid && !rl_valid)
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
- /* fill PQ map entry */
+ /* Fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg,
@@ -400,18 +392,16 @@ static void qed_tx_pq_map_rt_init(
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+ /* Set base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication
- * to PQ VF mask
- */
tx_pq_vf_mask[pq_id /
QM_PF_QUEUE_GROUP_SIZE] |=
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
@@ -421,16 +411,12 @@ static void qed_tx_pq_map_rt_init(
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- u32 addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
- }
- }
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -440,23 +426,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
/* a single other PQ group is used in each PF,
* where PQ group i is used in PF i.
*/
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn,
@@ -485,7 +473,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -514,7 +502,7 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
@@ -535,7 +523,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u32 inc_val;
u8 tc, i;
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
@@ -544,7 +532,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
@@ -578,17 +566,17 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -617,7 +605,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Timeout when waiting for QM SDM command ready signal\n");
@@ -701,16 +689,16 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
QM_OTHER_PQS_PER_PF;
u8 tc, i;
- /* clear first Tx PQ ID array for each VPORT */
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+ /* Map Other PQs (if any) */
qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
p_params->num_pf_cids, p_params->num_tids, 0);
- /* map Tx PQs */
+ /* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
if (p_params->pf_wfq)
@@ -736,7 +724,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -750,7 +738,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
@@ -766,17 +754,18 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
return -1;
}
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
-
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
qed_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
@@ -793,12 +782,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
if (vport_id >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -818,15 +807,15 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -962,8 +951,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
@@ -971,40 +962,26 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 pf_id)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
- u32 *p_ramline, i;
-
- p_ramline = (u32 *)&ramline;
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
+ pf_id * RAM_LINE_SIZE;
/*stop using gft logic */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
- memset(&camline, 0, sizeof(union gft_cam_line_union));
- qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- memset(&ramline, 0, sizeof(ramline));
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
-
- hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
-
- qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
- }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
}
void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 pf_id, bool tcp, bool udp,
bool ipv4, bool ipv6)
{
- u32 rfs_cm_hdr_event_id, *p_ramline;
union gft_cam_line_union camline;
struct gft_ram_line ramline;
- int i;
+ u32 rfs_cm_hdr_event_id;
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- p_ramline = (u32 *)&ramline;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1024,18 +1001,20 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
camline.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+ /* Cam line is now valid!! */
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
/* filters are per PF!! */
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1059,34 +1038,38 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+ /* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camline.cam_line_mapped.camline);
camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
PRS_REG_GFT_CAM +
CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramline.low32bits = 0;
- ramline.high32bits = 0;
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
-
- /* each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(p_ramline + i));
-
- /* set default profile so that no filter match will happen */
- ramline.low32bits = 0xffff;
- ramline.high32bits = 0xffff;
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
- *(p_ramline + i));
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramline.lo = 0;
+ ramline.hi = 0;
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
+ ramline.hi);
+
+ /* Set default profile so that no filter match will happen */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
+ ramline.hi);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 4a2e7be5bf72..e3f368882f46 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -158,6 +158,7 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
GFP_KERNEL);
if (!rt_data->init_val) {
kfree(rt_data->b_valid);
+ rt_data->b_valid = NULL;
return -ENOMEM;
}
@@ -167,7 +168,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
void qed_init_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->rt_data.init_val);
+ p_hwfn->rt_data.init_val = NULL;
kfree(p_hwfn->rt_data.b_valid);
+ p_hwfn->rt_data.b_valid = NULL;
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
@@ -525,6 +528,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
}
kfree(p_hwfn->unzip_buf);
+ p_hwfn->unzip_buf = NULL;
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 40f057edeafc..719cdbfe1695 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -90,6 +90,12 @@ struct aeu_invert_reg_bit {
/* Multiple bits start with this offset */
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT BIT(23)
+
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -105,1215 +111,6 @@ struct aeu_invert_reg {
#define MAX_ATTN_GRPS (8)
#define NUM_ATTN_REGS (9)
-/* HW Attention register */
-struct attn_hw_reg {
- u16 reg_idx; /* Index of this register in its block */
- u16 num_of_bits; /* number of valid attention bits */
- u32 sts_addr; /* Address of the STS register */
- u32 sts_clr_addr; /* Address of the STS_CLR register */
- u32 sts_wr_addr; /* Address of the STS_WR register */
- u32 mask_addr; /* Address of the MASK register */
-};
-
-/* HW block attention registers */
-struct attn_hw_regs {
- u16 num_of_int_regs; /* Number of interrupt regs */
- u16 num_of_prty_regs; /* Number of parity regs */
- struct attn_hw_reg **int_regs; /* interrupt regs */
- struct attn_hw_reg **prty_regs; /* parity regs */
-};
-
-/* HW block attention registers */
-struct attn_hw_block {
- const char *name; /* Block name */
- struct attn_hw_regs chip_regs[1];
-};
-
-static struct attn_hw_reg grc_int0_bb_b0 = {
- 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
-
-static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
- &grc_int0_bb_b0};
-
-static struct attn_hw_reg grc_prty1_bb_b0 = {
- 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
-
-static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
- &grc_prty1_bb_b0};
-
-static struct attn_hw_reg miscs_int0_bb_b0 = {
- 0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
-
-static struct attn_hw_reg miscs_int1_bb_b0 = {
- 1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
-
-static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
- &miscs_int0_bb_b0, &miscs_int1_bb_b0};
-
-static struct attn_hw_reg miscs_prty0_bb_b0 = {
- 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
-
-static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
- &miscs_prty0_bb_b0};
-
-static struct attn_hw_reg misc_int0_bb_b0 = {
- 0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
-
-static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
- &misc_int0_bb_b0};
-
-static struct attn_hw_reg pglue_b_int0_bb_b0 = {
- 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
-
-static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
- &pglue_b_int0_bb_b0};
-
-static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
- 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
-
-static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
- 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
-
-static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
- &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
-
-static struct attn_hw_reg cnig_int0_bb_b0 = {
- 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
-
-static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
- &cnig_int0_bb_b0};
-
-static struct attn_hw_reg cnig_prty0_bb_b0 = {
- 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
-
-static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
- &cnig_prty0_bb_b0};
-
-static struct attn_hw_reg cpmu_int0_bb_b0 = {
- 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
-
-static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
- &cpmu_int0_bb_b0};
-
-static struct attn_hw_reg ncsi_int0_bb_b0 = {
- 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
-
-static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
- &ncsi_int0_bb_b0};
-
-static struct attn_hw_reg ncsi_prty1_bb_b0 = {
- 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
-
-static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
- &ncsi_prty1_bb_b0};
-
-static struct attn_hw_reg opte_prty1_bb_b0 = {
- 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
-
-static struct attn_hw_reg opte_prty0_bb_b0 = {
- 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
-
-static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
- &opte_prty1_bb_b0, &opte_prty0_bb_b0};
-
-static struct attn_hw_reg bmb_int0_bb_b0 = {
- 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
-
-static struct attn_hw_reg bmb_int1_bb_b0 = {
- 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
-
-static struct attn_hw_reg bmb_int2_bb_b0 = {
- 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
-
-static struct attn_hw_reg bmb_int3_bb_b0 = {
- 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
-
-static struct attn_hw_reg bmb_int4_bb_b0 = {
- 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
-
-static struct attn_hw_reg bmb_int5_bb_b0 = {
- 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
-
-static struct attn_hw_reg bmb_int6_bb_b0 = {
- 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
-
-static struct attn_hw_reg bmb_int7_bb_b0 = {
- 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
-
-static struct attn_hw_reg bmb_int8_bb_b0 = {
- 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
-
-static struct attn_hw_reg bmb_int9_bb_b0 = {
- 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
-
-static struct attn_hw_reg bmb_int10_bb_b0 = {
- 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
-
-static struct attn_hw_reg bmb_int11_bb_b0 = {
- 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
-
-static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
- &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
- &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
- &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
-
-static struct attn_hw_reg bmb_prty0_bb_b0 = {
- 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
-
-static struct attn_hw_reg bmb_prty1_bb_b0 = {
- 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
-
-static struct attn_hw_reg bmb_prty2_bb_b0 = {
- 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
-
-static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
- &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
-
-static struct attn_hw_reg pcie_prty1_bb_b0 = {
- 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
-
-static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
- &pcie_prty1_bb_b0};
-
-static struct attn_hw_reg mcp2_prty0_bb_b0 = {
- 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
-
-static struct attn_hw_reg mcp2_prty1_bb_b0 = {
- 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
-
-static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
- &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
-
-static struct attn_hw_reg pswhst_int0_bb_b0 = {
- 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
-
-static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
- &pswhst_int0_bb_b0};
-
-static struct attn_hw_reg pswhst_prty0_bb_b0 = {
- 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
-
-static struct attn_hw_reg pswhst_prty1_bb_b0 = {
- 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
-
-static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
- &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
-
-static struct attn_hw_reg pswhst2_int0_bb_b0 = {
- 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
-
-static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
- &pswhst2_int0_bb_b0};
-
-static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
- 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
-
-static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
- &pswhst2_prty0_bb_b0};
-
-static struct attn_hw_reg pswrd_int0_bb_b0 = {
- 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
-
-static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
- &pswrd_int0_bb_b0};
-
-static struct attn_hw_reg pswrd_prty0_bb_b0 = {
- 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
-
-static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
- &pswrd_prty0_bb_b0};
-
-static struct attn_hw_reg pswrd2_int0_bb_b0 = {
- 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
-
-static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
- &pswrd2_int0_bb_b0};
-
-static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
- 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
-
-static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
- 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
-
-static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
- 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
-
-static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
- &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
-
-static struct attn_hw_reg pswwr_int0_bb_b0 = {
- 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
-
-static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
- &pswwr_int0_bb_b0};
-
-static struct attn_hw_reg pswwr_prty0_bb_b0 = {
- 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
-
-static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
- &pswwr_prty0_bb_b0};
-
-static struct attn_hw_reg pswwr2_int0_bb_b0 = {
- 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
-
-static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
- &pswwr2_int0_bb_b0};
-
-static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
- 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
-
-static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
- 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
-
-static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
- 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
-
-static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
- 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
-
-static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
- 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
-
-static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
- &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
- &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
-
-static struct attn_hw_reg pswrq_int0_bb_b0 = {
- 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
-
-static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
- &pswrq_int0_bb_b0};
-
-static struct attn_hw_reg pswrq_prty0_bb_b0 = {
- 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
-
-static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
- &pswrq_prty0_bb_b0};
-
-static struct attn_hw_reg pswrq2_int0_bb_b0 = {
- 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
-
-static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
- &pswrq2_int0_bb_b0};
-
-static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
- 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
-
-static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
- &pswrq2_prty1_bb_b0};
-
-static struct attn_hw_reg pglcs_int0_bb_b0 = {
- 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
-
-static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
- &pglcs_int0_bb_b0};
-
-static struct attn_hw_reg dmae_int0_bb_b0 = {
- 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
-
-static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
- &dmae_int0_bb_b0};
-
-static struct attn_hw_reg dmae_prty1_bb_b0 = {
- 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
-
-static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
- &dmae_prty1_bb_b0};
-
-static struct attn_hw_reg ptu_int0_bb_b0 = {
- 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
-
-static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
- &ptu_int0_bb_b0};
-
-static struct attn_hw_reg ptu_prty1_bb_b0 = {
- 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
-
-static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
- &ptu_prty1_bb_b0};
-
-static struct attn_hw_reg tcm_int0_bb_b0 = {
- 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
-
-static struct attn_hw_reg tcm_int1_bb_b0 = {
- 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
-
-static struct attn_hw_reg tcm_int2_bb_b0 = {
- 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
-
-static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
- &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
-
-static struct attn_hw_reg tcm_prty1_bb_b0 = {
- 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
-
-static struct attn_hw_reg tcm_prty2_bb_b0 = {
- 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
-
-static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
- &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
-
-static struct attn_hw_reg mcm_int0_bb_b0 = {
- 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
-
-static struct attn_hw_reg mcm_int1_bb_b0 = {
- 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
-
-static struct attn_hw_reg mcm_int2_bb_b0 = {
- 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
-
-static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
- &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
-
-static struct attn_hw_reg mcm_prty1_bb_b0 = {
- 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
-
-static struct attn_hw_reg mcm_prty2_bb_b0 = {
- 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
-
-static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
- &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
-
-static struct attn_hw_reg ucm_int0_bb_b0 = {
- 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
-
-static struct attn_hw_reg ucm_int1_bb_b0 = {
- 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
-
-static struct attn_hw_reg ucm_int2_bb_b0 = {
- 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
-
-static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
- &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
-
-static struct attn_hw_reg ucm_prty1_bb_b0 = {
- 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
-
-static struct attn_hw_reg ucm_prty2_bb_b0 = {
- 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
-
-static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
- &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
-
-static struct attn_hw_reg xcm_int0_bb_b0 = {
- 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
-
-static struct attn_hw_reg xcm_int1_bb_b0 = {
- 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
-
-static struct attn_hw_reg xcm_int2_bb_b0 = {
- 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
-
-static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
- &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
-
-static struct attn_hw_reg xcm_prty1_bb_b0 = {
- 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
-
-static struct attn_hw_reg xcm_prty2_bb_b0 = {
- 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
-
-static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
- &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
-
-static struct attn_hw_reg ycm_int0_bb_b0 = {
- 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
-
-static struct attn_hw_reg ycm_int1_bb_b0 = {
- 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
-
-static struct attn_hw_reg ycm_int2_bb_b0 = {
- 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
-
-static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
- &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
-
-static struct attn_hw_reg ycm_prty1_bb_b0 = {
- 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
-
-static struct attn_hw_reg ycm_prty2_bb_b0 = {
- 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
-
-static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
- &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
-
-static struct attn_hw_reg pcm_int0_bb_b0 = {
- 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
-
-static struct attn_hw_reg pcm_int1_bb_b0 = {
- 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
-
-static struct attn_hw_reg pcm_int2_bb_b0 = {
- 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
-
-static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
- &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
-
-static struct attn_hw_reg pcm_prty1_bb_b0 = {
- 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
-
-static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
- &pcm_prty1_bb_b0};
-
-static struct attn_hw_reg qm_int0_bb_b0 = {
- 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
-
-static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
- &qm_int0_bb_b0};
-
-static struct attn_hw_reg qm_prty0_bb_b0 = {
- 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
-
-static struct attn_hw_reg qm_prty1_bb_b0 = {
- 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
-
-static struct attn_hw_reg qm_prty2_bb_b0 = {
- 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
-
-static struct attn_hw_reg qm_prty3_bb_b0 = {
- 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
-
-static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
- &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
-
-static struct attn_hw_reg tm_int0_bb_b0 = {
- 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
-
-static struct attn_hw_reg tm_int1_bb_b0 = {
- 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
-
-static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
- &tm_int0_bb_b0, &tm_int1_bb_b0};
-
-static struct attn_hw_reg tm_prty1_bb_b0 = {
- 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
-
-static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
- &tm_prty1_bb_b0};
-
-static struct attn_hw_reg dorq_int0_bb_b0 = {
- 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
-
-static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
- &dorq_int0_bb_b0};
-
-static struct attn_hw_reg dorq_prty0_bb_b0 = {
- 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
-
-static struct attn_hw_reg dorq_prty1_bb_b0 = {
- 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
-
-static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
- &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
-
-static struct attn_hw_reg brb_int0_bb_b0 = {
- 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
-
-static struct attn_hw_reg brb_int1_bb_b0 = {
- 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
-
-static struct attn_hw_reg brb_int2_bb_b0 = {
- 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
-
-static struct attn_hw_reg brb_int3_bb_b0 = {
- 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
-
-static struct attn_hw_reg brb_int4_bb_b0 = {
- 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
-
-static struct attn_hw_reg brb_int5_bb_b0 = {
- 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
-
-static struct attn_hw_reg brb_int6_bb_b0 = {
- 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
-
-static struct attn_hw_reg brb_int7_bb_b0 = {
- 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
-
-static struct attn_hw_reg brb_int8_bb_b0 = {
- 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
-
-static struct attn_hw_reg brb_int9_bb_b0 = {
- 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
-
-static struct attn_hw_reg brb_int10_bb_b0 = {
- 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
-
-static struct attn_hw_reg brb_int11_bb_b0 = {
- 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
-
-static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
- &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
- &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
- &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
-
-static struct attn_hw_reg brb_prty0_bb_b0 = {
- 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
-
-static struct attn_hw_reg brb_prty1_bb_b0 = {
- 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
-
-static struct attn_hw_reg brb_prty2_bb_b0 = {
- 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
-
-static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
- &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
-
-static struct attn_hw_reg src_int0_bb_b0 = {
- 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
-
-static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
- &src_int0_bb_b0};
-
-static struct attn_hw_reg prs_int0_bb_b0 = {
- 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
-
-static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
- &prs_int0_bb_b0};
-
-static struct attn_hw_reg prs_prty0_bb_b0 = {
- 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
-
-static struct attn_hw_reg prs_prty1_bb_b0 = {
- 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
-
-static struct attn_hw_reg prs_prty2_bb_b0 = {
- 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
-
-static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
- &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
-
-static struct attn_hw_reg tsdm_int0_bb_b0 = {
- 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
-
-static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
- &tsdm_int0_bb_b0};
-
-static struct attn_hw_reg tsdm_prty1_bb_b0 = {
- 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
-
-static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
- &tsdm_prty1_bb_b0};
-
-static struct attn_hw_reg msdm_int0_bb_b0 = {
- 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
-
-static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
- &msdm_int0_bb_b0};
-
-static struct attn_hw_reg msdm_prty1_bb_b0 = {
- 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
-
-static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
- &msdm_prty1_bb_b0};
-
-static struct attn_hw_reg usdm_int0_bb_b0 = {
- 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
-
-static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
- &usdm_int0_bb_b0};
-
-static struct attn_hw_reg usdm_prty1_bb_b0 = {
- 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
-
-static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
- &usdm_prty1_bb_b0};
-
-static struct attn_hw_reg xsdm_int0_bb_b0 = {
- 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
-
-static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
- &xsdm_int0_bb_b0};
-
-static struct attn_hw_reg xsdm_prty1_bb_b0 = {
- 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
-
-static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
- &xsdm_prty1_bb_b0};
-
-static struct attn_hw_reg ysdm_int0_bb_b0 = {
- 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
-
-static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
- &ysdm_int0_bb_b0};
-
-static struct attn_hw_reg ysdm_prty1_bb_b0 = {
- 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
-
-static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
- &ysdm_prty1_bb_b0};
-
-static struct attn_hw_reg psdm_int0_bb_b0 = {
- 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
-
-static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
- &psdm_int0_bb_b0};
-
-static struct attn_hw_reg psdm_prty1_bb_b0 = {
- 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
-
-static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
- &psdm_prty1_bb_b0};
-
-static struct attn_hw_reg tsem_int0_bb_b0 = {
- 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
-
-static struct attn_hw_reg tsem_int1_bb_b0 = {
- 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
-
-static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
-
-static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
- &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg tsem_prty0_bb_b0 = {
- 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
-
-static struct attn_hw_reg tsem_prty1_bb_b0 = {
- 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
-
-static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
- 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
-
-static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
- &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
- &tsem_fast_memory_vfc_config_prty1_bb_b0};
-
-static struct attn_hw_reg msem_int0_bb_b0 = {
- 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
-
-static struct attn_hw_reg msem_int1_bb_b0 = {
- 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
-
-static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
-
-static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
- &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg msem_prty0_bb_b0 = {
- 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
-
-static struct attn_hw_reg msem_prty1_bb_b0 = {
- 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
-
-static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
- &msem_prty0_bb_b0, &msem_prty1_bb_b0};
-
-static struct attn_hw_reg usem_int0_bb_b0 = {
- 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
-
-static struct attn_hw_reg usem_int1_bb_b0 = {
- 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
-
-static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
-
-static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
- &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg usem_prty0_bb_b0 = {
- 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
-
-static struct attn_hw_reg usem_prty1_bb_b0 = {
- 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
-
-static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
- &usem_prty0_bb_b0, &usem_prty1_bb_b0};
-
-static struct attn_hw_reg xsem_int0_bb_b0 = {
- 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
-
-static struct attn_hw_reg xsem_int1_bb_b0 = {
- 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
-
-static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
-
-static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
- &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg xsem_prty0_bb_b0 = {
- 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
-
-static struct attn_hw_reg xsem_prty1_bb_b0 = {
- 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
-
-static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
- &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
-
-static struct attn_hw_reg ysem_int0_bb_b0 = {
- 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
-
-static struct attn_hw_reg ysem_int1_bb_b0 = {
- 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
-
-static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
-
-static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
- &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg ysem_prty0_bb_b0 = {
- 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
-
-static struct attn_hw_reg ysem_prty1_bb_b0 = {
- 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
-
-static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
- &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
-
-static struct attn_hw_reg psem_int0_bb_b0 = {
- 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
-
-static struct attn_hw_reg psem_int1_bb_b0 = {
- 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
-
-static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
-
-static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
- &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg psem_prty0_bb_b0 = {
- 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
-
-static struct attn_hw_reg psem_prty1_bb_b0 = {
- 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
-
-static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
- 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
-
-static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
- &psem_prty0_bb_b0, &psem_prty1_bb_b0,
- &psem_fast_memory_vfc_config_prty1_bb_b0};
-
-static struct attn_hw_reg rss_int0_bb_b0 = {
- 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
-
-static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
- &rss_int0_bb_b0};
-
-static struct attn_hw_reg rss_prty1_bb_b0 = {
- 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
-
-static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
- &rss_prty1_bb_b0};
-
-static struct attn_hw_reg tmld_int0_bb_b0 = {
- 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
-
-static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
- &tmld_int0_bb_b0};
-
-static struct attn_hw_reg tmld_prty1_bb_b0 = {
- 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
-
-static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
- &tmld_prty1_bb_b0};
-
-static struct attn_hw_reg muld_int0_bb_b0 = {
- 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
-
-static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
- &muld_int0_bb_b0};
-
-static struct attn_hw_reg muld_prty1_bb_b0 = {
- 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
-
-static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
- &muld_prty1_bb_b0};
-
-static struct attn_hw_reg yuld_int0_bb_b0 = {
- 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
-
-static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
- &yuld_int0_bb_b0};
-
-static struct attn_hw_reg yuld_prty1_bb_b0 = {
- 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
-
-static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
- &yuld_prty1_bb_b0};
-
-static struct attn_hw_reg xyld_int0_bb_b0 = {
- 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
-
-static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
- &xyld_int0_bb_b0};
-
-static struct attn_hw_reg xyld_prty1_bb_b0 = {
- 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
-
-static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
- &xyld_prty1_bb_b0};
-
-static struct attn_hw_reg prm_int0_bb_b0 = {
- 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
-
-static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
- &prm_int0_bb_b0};
-
-static struct attn_hw_reg prm_prty0_bb_b0 = {
- 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
-
-static struct attn_hw_reg prm_prty1_bb_b0 = {
- 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
-
-static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
- &prm_prty0_bb_b0, &prm_prty1_bb_b0};
-
-static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
- 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
-
-static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
- &pbf_pb1_int0_bb_b0};
-
-static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
- 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
-
-static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
- &pbf_pb1_prty0_bb_b0};
-
-static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
- 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
-
-static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
- &pbf_pb2_int0_bb_b0};
-
-static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
- 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
-
-static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
- &pbf_pb2_prty0_bb_b0};
-
-static struct attn_hw_reg rpb_int0_bb_b0 = {
- 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
-
-static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
- &rpb_int0_bb_b0};
-
-static struct attn_hw_reg rpb_prty0_bb_b0 = {
- 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
-
-static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
- &rpb_prty0_bb_b0};
-
-static struct attn_hw_reg btb_int0_bb_b0 = {
- 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
-
-static struct attn_hw_reg btb_int1_bb_b0 = {
- 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
-
-static struct attn_hw_reg btb_int2_bb_b0 = {
- 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
-
-static struct attn_hw_reg btb_int3_bb_b0 = {
- 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
-
-static struct attn_hw_reg btb_int4_bb_b0 = {
- 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
-
-static struct attn_hw_reg btb_int5_bb_b0 = {
- 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
-
-static struct attn_hw_reg btb_int6_bb_b0 = {
- 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
-
-static struct attn_hw_reg btb_int8_bb_b0 = {
- 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
-
-static struct attn_hw_reg btb_int9_bb_b0 = {
- 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
-
-static struct attn_hw_reg btb_int10_bb_b0 = {
- 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
-
-static struct attn_hw_reg btb_int11_bb_b0 = {
- 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
-
-static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
- &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
- &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
- &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
-
-static struct attn_hw_reg btb_prty0_bb_b0 = {
- 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
-
-static struct attn_hw_reg btb_prty1_bb_b0 = {
- 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
-
-static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
- &btb_prty0_bb_b0, &btb_prty1_bb_b0};
-
-static struct attn_hw_reg pbf_int0_bb_b0 = {
- 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
-
-static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
- &pbf_int0_bb_b0};
-
-static struct attn_hw_reg pbf_prty0_bb_b0 = {
- 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
-
-static struct attn_hw_reg pbf_prty1_bb_b0 = {
- 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
-
-static struct attn_hw_reg pbf_prty2_bb_b0 = {
- 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
-
-static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
- &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
-
-static struct attn_hw_reg rdif_int0_bb_b0 = {
- 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
-
-static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
- &rdif_int0_bb_b0};
-
-static struct attn_hw_reg rdif_prty0_bb_b0 = {
- 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
-
-static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
- &rdif_prty0_bb_b0};
-
-static struct attn_hw_reg tdif_int0_bb_b0 = {
- 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
-
-static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
- &tdif_int0_bb_b0};
-
-static struct attn_hw_reg tdif_prty0_bb_b0 = {
- 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
-
-static struct attn_hw_reg tdif_prty1_bb_b0 = {
- 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
-
-static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
- &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
-
-static struct attn_hw_reg cdu_int0_bb_b0 = {
- 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
-
-static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
- &cdu_int0_bb_b0};
-
-static struct attn_hw_reg cdu_prty1_bb_b0 = {
- 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
-
-static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
- &cdu_prty1_bb_b0};
-
-static struct attn_hw_reg ccfc_int0_bb_b0 = {
- 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
-
-static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
- &ccfc_int0_bb_b0};
-
-static struct attn_hw_reg ccfc_prty1_bb_b0 = {
- 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
-
-static struct attn_hw_reg ccfc_prty0_bb_b0 = {
- 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
-
-static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
- &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
-
-static struct attn_hw_reg tcfc_int0_bb_b0 = {
- 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
-
-static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
- &tcfc_int0_bb_b0};
-
-static struct attn_hw_reg tcfc_prty1_bb_b0 = {
- 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
-
-static struct attn_hw_reg tcfc_prty0_bb_b0 = {
- 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
-
-static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
- &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
-
-static struct attn_hw_reg igu_int0_bb_b0 = {
- 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
-
-static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
- &igu_int0_bb_b0};
-
-static struct attn_hw_reg igu_prty0_bb_b0 = {
- 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
-
-static struct attn_hw_reg igu_prty1_bb_b0 = {
- 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
-
-static struct attn_hw_reg igu_prty2_bb_b0 = {
- 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
-
-static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
- &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
-
-static struct attn_hw_reg cau_int0_bb_b0 = {
- 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
-
-static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
- &cau_int0_bb_b0};
-
-static struct attn_hw_reg cau_prty1_bb_b0 = {
- 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
-
-static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
- &cau_prty1_bb_b0};
-
-static struct attn_hw_reg dbg_int0_bb_b0 = {
- 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
-
-static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
- &dbg_int0_bb_b0};
-
-static struct attn_hw_reg dbg_prty1_bb_b0 = {
- 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
-
-static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
- &dbg_prty1_bb_b0};
-
-static struct attn_hw_reg nig_int0_bb_b0 = {
- 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
-
-static struct attn_hw_reg nig_int1_bb_b0 = {
- 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
-
-static struct attn_hw_reg nig_int2_bb_b0 = {
- 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
-
-static struct attn_hw_reg nig_int3_bb_b0 = {
- 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
-
-static struct attn_hw_reg nig_int4_bb_b0 = {
- 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
-
-static struct attn_hw_reg nig_int5_bb_b0 = {
- 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
-
-static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
- &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
- &nig_int4_bb_b0, &nig_int5_bb_b0};
-
-static struct attn_hw_reg nig_prty0_bb_b0 = {
- 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
-
-static struct attn_hw_reg nig_prty1_bb_b0 = {
- 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
-
-static struct attn_hw_reg nig_prty2_bb_b0 = {
- 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
-
-static struct attn_hw_reg nig_prty3_bb_b0 = {
- 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
-
-static struct attn_hw_reg nig_prty4_bb_b0 = {
- 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
-
-static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
- &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
- &nig_prty3_bb_b0, &nig_prty4_bb_b0};
-
-static struct attn_hw_reg ipc_int0_bb_b0 = {
- 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
-
-static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
- &ipc_int0_bb_b0};
-
-static struct attn_hw_reg ipc_prty0_bb_b0 = {
- 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
-
-static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
- &ipc_prty0_bb_b0};
-
-static struct attn_hw_block attn_blocks[] = {
- {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
- {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
- {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
- {"dbu", {{0, 0, NULL, NULL} } },
- {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
- pglue_b_prty_bb_b0_regs} } },
- {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
- {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
- {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
- {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
- {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
- {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
- {"mcp", {{0, 0, NULL, NULL} } },
- {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
- {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
- {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
- pswhst2_prty_bb_b0_regs} } },
- {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
- {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
- {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
- {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
- {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
- {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
- {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
- {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
- {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
- {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
- {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
- {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
- {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
- {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
- {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
- {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
- {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
- {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
- {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
- {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
- {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
- {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
- {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
- {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
- {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
- {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
- {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
- {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
- {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
- {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
- {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
- {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
- {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
- {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
- {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
- {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
- {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
- {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
- {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
- {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
- pbf_pb1_prty_bb_b0_regs} } },
- {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
- pbf_pb2_prty_bb_b0_regs} } },
- {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
- {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
- {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
- {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
- {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
- {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
- {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
- {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
- {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
- {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
- {"umac", { {0, 0, NULL, NULL} } },
- {"xmac", { {0, 0, NULL, NULL} } },
- {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
- {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
- {"wol", { {0, 0, NULL, NULL} } },
- {"bmbn", { {0, 0, NULL, NULL} } },
- {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
- {"nwm", { {0, 0, NULL, NULL} } },
- {"nws", { {0, 0, NULL, NULL} } },
- {"ms", { {0, 0, NULL, NULL} } },
- {"phy_pcie", { {0, 0, NULL, NULL} } },
- {"misc_aeu", { {0, 0, NULL, NULL} } },
- {"bar0_map", { {0, 0, NULL, NULL} } },};
-
/* Specific HW attention callbacks */
static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
{
@@ -1590,6 +387,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+};
+
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
@@ -1636,8 +452,22 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
{"General Attention 35", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
- {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
- NULL, BLOCK_CNIG},
+ {"NWS Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ NULL, BLOCK_NWS},
+ {"NWS Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ NULL, BLOCK_NWS},
+ {"NWM Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ NULL, BLOCK_NWM},
+ {"NWM Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ NULL, BLOCK_NWM},
{"MCP CPU", ATTENTION_SINGLE,
qed_mcp_attn_cb, MAX_BLOCK_ID},
{"MCP Watchdog timer", ATTENTION_SINGLE,
@@ -1775,6 +605,27 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
},
};
+static struct aeu_invert_reg_bit *
+qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!QED_IS_BB(p_hwfn->cdev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
@@ -1863,26 +714,23 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
return 0;
}
-static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
- struct attn_hw_reg *p_reg_desc,
- struct attn_hw_block *p_block,
- enum qed_attention_type type,
- u32 val, u32 mask)
+static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
+ enum block_id id,
+ enum dbg_attn_type type, bool b_clear)
{
- int j;
+ struct dbg_attn_block_result attn_results;
+ enum dbg_status status;
- for (j = 0; j < p_reg_desc->num_of_bits; j++) {
- if (!(val & (1 << j)))
- continue;
+ memset(&attn_results, 0, sizeof(attn_results));
+ status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
+ b_clear, &attn_results);
+ if (status != DBG_STATUS_OK)
DP_NOTICE(p_hwfn,
- "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
- p_block->name,
- type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
- "Parity",
- p_reg_desc->reg_idx, p_reg_desc->sts_addr,
- j, (mask & (1 << j)) ? " [MASKED]" : "");
- }
+ "Failed to parse attention information [status: %s]\n",
+ qed_dbg_get_status_str(status));
+ else
+ qed_dbg_parse_attn(p_hwfn, &attn_results);
}
/**
@@ -1901,53 +749,30 @@ static int
qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
u32 aeu_en_reg,
- u32 bitmask)
+ const char *p_bit_name, u32 bitmask)
{
+ bool b_fatal = false;
int rc = -EINVAL;
u32 val;
DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
- p_aeu->bit_name, bitmask);
+ p_bit_name, bitmask);
/* Call callback before clearing the interrupt status */
if (p_aeu->cb) {
DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
- p_aeu->bit_name);
+ p_bit_name);
rc = p_aeu->cb(p_hwfn);
}
- /* Handle HW block interrupt registers */
- if (p_aeu->block_index != MAX_BLOCK_ID) {
- struct attn_hw_block *p_block;
- u32 mask;
- int i;
-
- p_block = &attn_blocks[p_aeu->block_index];
-
- /* Handle each interrupt register */
- for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 sts_addr;
+ if (rc)
+ b_fatal = true;
- p_reg_desc = p_block->chip_regs[0].int_regs[i];
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID)
+ qed_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
- /* In case of fatal attention, don't clear the status
- * so it would appear in following idle check.
- */
- if (rc == 0)
- sts_addr = p_reg_desc->sts_clr_addr;
- else
- sts_addr = p_reg_desc->sts_addr;
-
- val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
- mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- QED_ATTN_TYPE_ATTN,
- val, mask);
- }
- }
/* If the attention is benign, no need to prevent it */
if (!rc)
@@ -1957,66 +782,48 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
- p_aeu->bit_name);
+ p_bit_name);
out:
return rc;
}
-static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
- struct aeu_invert_reg_bit *p_aeu,
- struct attn_hw_block *p_block,
- u8 bit_index)
-{
- int i;
-
- for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 val, mask;
-
- p_reg_desc = p_block->chip_regs[0].prty_regs[i];
-
- val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->sts_clr_addr);
- mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- QED_ATTN_TYPE_PARITY,
- val, mask);
- }
-}
-
/**
* @brief qed_int_deassertion_parity - handle a single parity AEU source
*
* @param p_hwfn
* @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param aeu_en_reg - address of the AEU enable register
* @param bit_index
*/
static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
- u8 bit_index)
+ u32 aeu_en_reg, u8 bit_index)
{
- u32 block_id = p_aeu->block_index;
+ u32 block_id = p_aeu->block_index, mask, val;
- DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
- p_aeu->bit_name, bit_index);
+ DP_NOTICE(p_hwfn->cdev,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
if (block_id != MAX_BLOCK_ID) {
- qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
- bit_index);
+ qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
/* In BB, there's a single parity bit for several blocks */
if (block_id == BLOCK_BTB) {
- qed_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_OPTE],
- bit_index);
- qed_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_MCP],
- bit_index);
+ qed_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ qed_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
}
}
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~BIT(bit_index);
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
}
/**
@@ -2031,7 +838,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
u8 i, j, k, bit_idx;
int rc = 0;
@@ -2048,11 +855,11 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
/* Find parity attentions first */
for (i = 0; i < NUM_ATTN_REGS; i++) {
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
- u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32));
u32 parities;
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+
/* Skip register in which no parity bit is currently set */
parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
if (!parities)
@@ -2061,10 +868,10 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
- if ((p_bit->flags & ATTENTION_PARITY) &&
+ if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
!!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
- bit_idx);
+ aeu_en, bit_idx);
bit_idx += ATTENTION_LENGTH(p_bit->flags);
}
@@ -2079,10 +886,11 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
continue;
for (i = 0; i < NUM_ATTN_REGS; i++) {
- u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32) +
- k * sizeof(u32) * NUM_ATTN_REGS;
- u32 en, bits;
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
bits = aeu_inv_arr[i] & en;
@@ -2096,29 +904,54 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
* previous assertion.
*/
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ long unsigned int bitmask;
u8 bit, bit_len;
- u32 bitmask;
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
-
- /* No need to handle parity-only bits */
- if (p_aeu->flags == ATTENTION_PAR)
- continue;
+ p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
- if (p_aeu->flags & ATTENTION_PAR_INT) {
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
/* Skip Parity */
bit++;
bit_len--;
}
bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+ u8 num;
+
+ num = (u8)find_first_bit(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * a single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ snprintf(bit_name, 30,
+ p_aeu->bit_name, num);
+ else
+ strncpy(bit_name,
+ p_aeu->bit_name, 30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
/* Handle source of the attention */
qed_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
aeu_en,
+ bit_name,
bitmask);
}
@@ -2328,6 +1161,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
SB_ATTN_ALIGNED_SIZE(p_hwfn),
p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
+ p_hwfn->p_sb_attn = NULL;
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -2365,12 +1199,13 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
- unsigned int flags = aeu_descs[i].bits[j].flags;
+ struct aeu_invert_reg_bit *p_aeu;
- if (flags & ATTENTION_PARITY)
+ p_aeu = &aeu_descs[i].bits[j];
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu))
sb_info->parity_mask[i] |= 1 << k;
- k += ATTENTION_LENGTH(flags);
+ k += ATTENTION_LENGTH(p_aeu->flags);
}
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"Attn Mask [Reg %d]: 0x%08x\n",
@@ -2465,6 +1300,40 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
+static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
@@ -2531,40 +1400,6 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
}
}
-void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum qed_coalescing_fsm coalescing_fsm,
- u8 timeset)
-{
- struct cau_pi_entry pi_entry;
- u32 sb_offset, pi_offset;
-
- if (IS_VF(p_hwfn->cdev))
- return;
-
- sb_offset = igu_sb_id * PIS_PER_SB;
- memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
- if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
- else
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
-
- pi_offset = sb_offset + pi_index;
- if (p_hwfn->hw_init_done) {
- qed_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
- *((u32 *)&(pi_entry)));
- } else {
- STORE_RT_REG(p_hwfn,
- CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
- *((u32 *)&(pi_entry)));
- }
-}
-
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
@@ -2577,16 +1412,47 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->igu_sb_id, 0, 0);
}
-/**
- * @brief qed_get_igu_sb_id - given a sw sb_id return the
- * igu_sb_id
- *
- * @param p_hwfn
- * @param sb_id
- *
- * @return u16
- */
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !(p_block->status & QED_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
+ return p_block;
+ }
+
+ return NULL;
+}
+
+static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return QED_SB_INVALID_IDX;
+}
+
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2594,7 +1460,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else if (IS_PF(p_hwfn->cdev))
- igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
@@ -2619,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != QED_SP_SB_ID) {
- p_hwfn->sbs_info[sb_id] = sb_info;
- p_hwfn->num_sbs++;
+ if (IS_PF(p_hwfn->cdev)) {
+ struct qed_igu_info *p_info;
+ struct qed_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
}
sb_info->cdev = p_hwfn->cdev;
@@ -2649,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info, u16 sb_id)
{
- if (sb_id == QED_SP_SB_ID) {
- DP_ERR(p_hwfn, "Do Not free sp sb using this function");
- return -EINVAL;
- }
+ struct qed_igu_block *p_block;
+ struct qed_igu_info *p_info;
+
+ if (!sb_info)
+ return 0;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- if (p_hwfn->sbs_info[sb_id] != NULL) {
- p_hwfn->sbs_info[sb_id] = NULL;
- p_hwfn->num_sbs--;
+ if (IS_VF(p_hwfn->cdev)) {
+ qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
+ return 0;
}
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (!p_block->vector_number) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = NULL;
+ p_block->status |= QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
return 0;
}
@@ -2679,6 +1571,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
p_sb->sb_info.sb_virt,
p_sb->sb_info.sb_phys);
kfree(p_sb);
+ p_hwfn->p_sp_sb = NULL;
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2780,10 +1673,9 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}
-int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- int rc = 0;
/* Configure AEU signal change to produce attentions */
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
@@ -2796,6 +1688,16 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+int
+qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
+{
+ int rc = 0;
+
+ qed_int_igu_enable_attn(p_hwfn, p_ptt);
+
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
if (rc) {
@@ -2824,10 +1726,11 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id, bool cleanup_set, u16 opaque_fid)
+ u16 igu_sb_id,
+ bool cleanup_set, u16 opaque_fid)
{
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
- u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
/* Set the data field */
@@ -2850,8 +1753,8 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
mmiowb();
/* calculate where to read the status bit from */
- sb_bit = 1 << (sb_id % 32);
- sb_bit_addr = sb_id / 32 * sizeof(u32);
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
@@ -2868,29 +1771,38 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
if (!sleep_cnt)
DP_NOTICE(p_hwfn,
"Timeout waiting for clear status 0x%08x [for sb %d]\n",
- val, sb_id);
+ val, igu_sb_id);
}
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set)
+ u16 igu_sb_id, u16 opaque, bool b_set)
{
+ struct qed_igu_block *p_block;
int pi, i;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
/* Set */
if (b_set)
- qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
/* Clear */
- qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
/* Wait for the IGU SB to cleanup */
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
u32 val;
val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
- if (val & (1 << (sb_id % 32)))
+ IGU_REG_WRITE_DONE_PENDING +
+ ((igu_sb_id / 32) * 4));
+ if (val & BIT((igu_sb_id % 32)))
usleep_range(10, 20);
else
break;
@@ -2898,84 +1810,205 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
if (i == IGU_CLEANUP_SLEEP_LENGTH)
DP_NOTICE(p_hwfn,
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
- sb_id);
+ igu_sb_id);
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
}
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set, bool b_slowpath)
{
- u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
- u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0, val = 0;
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning SBs [%d,...,%d]\n",
- igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
- for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & QED_IGU_STATUS_DSB))
+ continue;
+
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
+ }
- if (!b_slowpath)
- return;
-
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid, b_set);
+ if (b_slowpath)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 sb_id)
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
struct qed_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
- p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if (!RESC_NUM(p_hwfn, QED_SB)) {
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, QED_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
+ }
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- goto out;
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
- /* Fill the block information */
- p_block->status = QED_IGU_STATUS_VALID;
- p_block->function_id = GET_FIELD(val,
- IGU_MAPPING_LINE_FUNCTION_NUMBER);
- p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- p_block->vector_number = GET_FIELD(val,
- IGU_MAPPING_LINE_VECTOR_NUMBER);
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
- sb_id, val, p_block->function_id,
- p_block->is_pf, p_block->vector_number);
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov - p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return -EINVAL;
+ }
-out:
- return val;
+ /* Currently cap the number of VFs SBs by the
+ * number of VFs.
+ */
+ p_info->usage.iov_cnt = vfs;
+ }
+ }
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & QED_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id, val);
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf,
+ p_block->vector_number, rval, val);
+ }
+ }
+
+ return 0;
+}
+
+static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 igu_sb_id)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+ struct qed_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+
+ /* Fill the block information */
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+ p_block->igu_sb_id = igu_sb_id;
}
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
- u32 val, min_vf = 0, max_vf = 0;
- u16 sb_id, last_iov_sb_id = 0;
- struct qed_igu_block *blk;
- u16 prev_sb_id = 0xFF;
+ struct qed_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info)
@@ -2983,12 +2016,10 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs and VFs */
- p_igu_info->igu_base_sb = 0xffff;
- p_igu_info->igu_sb_cnt = 0;
- p_igu_info->igu_dsb_id = 0xffff;
- p_igu_info->igu_base_sb_iov = 0xffff;
+ /* Distinguish between existent and non-existent default SB */
+ p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
+ /* Find the range of VF ids whose SB belong to this PF */
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
@@ -2996,113 +2027,69 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
- sb_id++) {
- blk = &p_igu_info->igu_map.igu_blocks[sb_id];
-
- val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
-
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- break;
-
- if (blk->is_pf) {
- if (blk->function_id == p_hwfn->rel_pf_id) {
- blk->status |= QED_IGU_STATUS_PF;
-
- if (blk->vector_number == 0) {
- if (p_igu_info->igu_dsb_id == 0xffff)
- p_igu_info->igu_dsb_id = sb_id;
- } else {
- if (p_igu_info->igu_base_sb ==
- 0xffff) {
- p_igu_info->igu_base_sb = sb_id;
- } else if (prev_sb_id != sb_id - 1) {
- DP_NOTICE(p_hwfn->cdev,
- "consecutive igu vectors for HWFN %x broken",
- p_hwfn->rel_pf_id);
- break;
- }
- prev_sb_id = sb_id;
- /* we don't count the default */
- (p_igu_info->igu_sb_cnt)++;
- }
- }
- } else {
- if ((blk->function_id >= min_vf) &&
- (blk->function_id < max_vf)) {
- /* Available for VFs of this PF */
- if (p_igu_info->igu_base_sb_iov == 0xffff) {
- p_igu_info->igu_base_sb_iov = sb_id;
- } else if (last_iov_sb_id != sb_id - 1) {
- if (!val) {
- DP_VERBOSE(p_hwfn->cdev,
- NETIF_MSG_INTR,
- "First uninitialized IGU CAM entry at index 0x%04x\n",
- sb_id);
- } else {
- DP_NOTICE(p_hwfn->cdev,
- "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
- p_hwfn->rel_pf_id,
- last_iov_sb_id,
- sb_id); }
- break;
- }
- blk->status |= QED_IGU_STATUS_FREE;
- p_hwfn->hw_info.p_igu_info->free_blks++;
- last_iov_sb_id = sb_id;
- }
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
}
- }
- /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
- * the number of VF SBs [especially for first VF on engine, as we can't
- * differentiate between empty entries and its entries].
- * Since we don't really support more SBs than VFs today, prevent any
- * such configuration by sanitizing the number of SBs to equal the
- * number of VFs.
- */
- if (IS_PF_SRIOV(p_hwfn)) {
- u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= QED_IGU_STATUS_DSB;
+ }
- if (total_vfs < p_igu_info->free_blks) {
- DP_VERBOSE(p_hwfn,
- (NETIF_MSG_INTR | QED_MSG_IOV),
- "Limiting number of SBs for IOV - %04x --> %04x\n",
- p_igu_info->free_blks,
- p_hwfn->cdev->p_iov_info->total_vfs);
- p_igu_info->free_blks = total_vfs;
- } else if (total_vfs > p_igu_info->free_blks) {
- DP_NOTICE(p_hwfn,
- "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
- p_igu_info->free_blks, total_vfs);
- return -EINVAL;
+ /* limit number of prints by having each PF print only its
+ * entries with the exception of PF0 which would print
+ * everything.
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
}
}
- p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
-
- DP_VERBOSE(
- p_hwfn,
- NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_base_sb_iov,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_sb_cnt_iov,
- p_igu_info->igu_dsb_id);
-
- if (p_igu_info->igu_base_sb == 0xffff ||
- p_igu_info->igu_dsb_id == 0xffff ||
- p_igu_info->igu_sb_cnt == 0) {
+
+ if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
DP_NOTICE(p_hwfn,
- "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
return -EINVAL;
}
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id,
+ p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
+
return 0;
}
@@ -3157,6 +2144,7 @@ static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->sp_dpc);
+ p_hwfn->sp_dpc = NULL;
}
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -3198,31 +2186,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
if (!info || !p_sb_cnt_info)
return;
- p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
- p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
- p_sb_cnt_info->sb_free_blk = info->free_blks;
-}
-
-u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
-{
- struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- /* We want the first VF queue to be adjacent to the
- * last PF queue. Since L2 queues can be partial to
- * SBs, we'll use the feature instead.
- */
- return sb_id - p_info->igu_base_sb_iov +
- FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
- } else {
- DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
- return 0;
- }
+ memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 0ae0bb4593ef..5199634ed630 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -79,24 +79,6 @@ enum qed_coalescing_fsm {
};
/**
- * @brief qed_int_cau_conf_pi - configure cau for a given
- * status block
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id
- * @param pi_index
- * @param state
- * @param timeset
- */
-void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum qed_coalescing_fsm coalescing_fsm,
- u8 timeset);
-
-/**
* @brief qed_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
@@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+#define QED_SB_INVALID_IDX 0xffff
+
struct qed_igu_block {
- u8 status;
+ u8 status;
#define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04
+#define QED_IGU_STATUS_DSB 0x08
- u8 vector_number;
- u8 function_id;
- u8 is_pf;
-};
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
-struct qed_igu_map {
- struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+ struct qed_sb_info *sb_info;
};
struct qed_igu_info {
- struct qed_igu_map igu_map;
- u16 igu_dsb_id;
- u16 igu_base_sb;
- u16 igu_base_sb_iov;
- u16 igu_sb_cnt;
- u16 igu_sb_cnt_iov;
- u16 free_blks;
+ struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
+ u16 igu_dsb_id;
+
+ struct qed_sb_cnt_info usage;
+
+ bool b_allow_pf_vf_change;
};
-/* TODO Names of function may change... */
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, NULL if none is available
+ */
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
+ bool b_is_pf);
+
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set,
@@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
*
* @param p_hwfn
* @param p_ptt
- * @param sb_id - igu status block id
+ * @param igu_sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
+ u16 igu_sb_id,
u16 opaque,
bool b_set);
@@ -377,16 +390,6 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
-
-/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 339c91dfa658..813c77cc857f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -44,7 +44,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -63,6 +62,22 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
+static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -186,7 +201,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
DP_ERR(p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
- p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
return -EINVAL;
}
@@ -221,7 +236,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) {
- val = p_hwfn->sbs_info[i]->igu_sb_id;
+ val = qed_get_igu_sb_id(p_hwfn, i);
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
@@ -266,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_iscsi_async_event);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -375,7 +393,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
p_tcp->srtt = cpu_to_le16(p_conn->srtt);
p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
- p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
@@ -400,8 +417,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->mss = cpu_to_le16(p_conn->mss);
p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
- dval = p_conn->ts_ticks_per_second;
- p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
wval = p_conn->da_timeout_value;
p_tcp->da_timeout_value = cpu_to_le16(wval);
p_tcp->ack_frequency = p_conn->ack_frequency;
@@ -492,6 +507,54 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_mac_update *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u8 ucval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
+ SET_FIELD(p_ramrod->hdr.flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+ ucval = p_conn->remote_mac[1];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
+ ucval = p_conn->remote_mac[0];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval;
+ ucval = p_conn->remote_mac[3];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval;
+ ucval = p_conn->remote_mac[2];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval;
+ ucval = p_conn->remote_mac[5];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval;
+ ucval = p_conn->remote_mac[4];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
@@ -587,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
- return qed_spq_post(p_hwfn, p_ent, NULL);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ return rc;
}
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
@@ -708,7 +774,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- r2tq_num_elements, 0x80, &p_conn->r2tq);
+ r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
if (rc)
goto nomem_r2tq;
@@ -719,7 +785,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements,
- sizeof(struct iscsi_uhqe), &p_conn->uhq);
+ sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
if (rc)
goto nomem_uhq;
@@ -729,7 +795,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements,
- sizeof(struct iscsi_xhqe), &p_conn->xhq);
+ sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
if (rc)
goto nomem;
@@ -822,29 +888,32 @@ void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
kfree(p_conn);
}
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
if (!p_iscsi_info)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&p_iscsi_info->free_list);
- return p_iscsi_info;
+
+ p_hwfn->p_iscsi_info = p_iscsi_info;
+ return 0;
}
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn)
{
- spin_lock_init(&p_iscsi_info->lock);
+ spin_lock_init(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_free(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_conn *p_conn = NULL;
+ if (!p_hwfn->p_iscsi_info)
+ return;
+
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry);
@@ -854,7 +923,8 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
}
}
- kfree(p_iscsi_info);
+ kfree(p_hwfn->p_iscsi_info);
+ p_hwfn->p_iscsi_info = NULL;
}
static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
@@ -1324,6 +1394,23 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
+static int qed_iscsi_change_mac(struct qed_dev *cdev,
+ u32 handle, const u8 *mac)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev),
+ hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats)
{
@@ -1358,6 +1445,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.destroy_conn = &qed_iscsi_destroy_conn,
.clear_sq = &qed_iscsi_clear_conn_sq,
.get_stats = &qed_iscsi_stats,
+ .change_mac = &qed_iscsi_change_mac,
};
const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index ae98f772cbc0..225c75b02a06 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -57,13 +57,11 @@ extern const struct qed_ll2_ops qed_ll2_ops_pass;
#endif
#if IS_ENABLED(CONFIG_QED_ISCSI)
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
* @brief - Fills provided statistics struct with statistics.
@@ -74,12 +72,15 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static inline struct qed_iscsi_info *qed_iscsi_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
-static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
-static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
new file mode 100644
index 000000000000..b251ebaec4db
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -0,0 +1,2408 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include "qed_cxt.h"
+#include "qed_hw.h"
+#include "qed_ll2.h"
+#include "qed_rdma.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+#define QED_IWARP_ORD_DEFAULT 32
+#define QED_IWARP_IRD_DEFAULT 32
+#define QED_IWARP_MAX_FW_MSS 4120
+
+#define QED_EP_SIG 0xecabcdef
+
+struct mpa_v2_hdr {
+ __be16 ird;
+ __be16 ord;
+};
+
+#define MPA_V2_PEER2PEER_MODEL 0x8000
+#define MPA_V2_SEND_RTR 0x4000 /* on ird */
+#define MPA_V2_READ_RTR 0x4000 /* on ord */
+#define MPA_V2_WRITE_RTR 0x8000
+#define MPA_V2_IRD_ORD_MASK 0x3FFF
+
+#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
+
+#define QED_IWARP_INVALID_TCP_CID 0xffffffff
+#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
+#define TIMESTAMP_HEADER_SIZE (12)
+
+#define QED_IWARP_TS_EN BIT(0)
+#define QED_IWARP_DA_EN BIT(1)
+#define QED_IWARP_PARAM_CRC_NEEDED (1)
+#define QED_IWARP_PARAM_P2P (1)
+
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+/* Override devinfo with iWARP specific values */
+void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
+ dev->max_qp = min_t(u32,
+ IWARP_MAX_QPS,
+ p_hwfn->p_rdma_info->num_qps) -
+ QED_IWARP_PREALLOC_CNT;
+
+ dev->max_cq = dev->max_qp;
+
+ dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
+ dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
+}
+
+void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+}
+
+/* We have two cid maps, one for tcp which should be used only from passive
+ * syn processing and replacing a pre-allocated ep in the list. The second
+ * for active tcp and for QPs.
+ */
+static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (cid < QED_IWARP_PREALLOC_CNT)
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+ cid);
+ else
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
+ return rc;
+ }
+ *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
+ if (rc)
+ qed_iwarp_cid_cleaned(p_hwfn, *cid);
+
+ return rc;
+}
+
+static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+/* This function allocates a cid for passive tcp (called from syn receive)
+ * the reason it's separate from the regular cid allocation is because it
+ * is assured that these cids already have ilt allocated. They are preallocated
+ * to ensure that we won't need to allocate memory during syn processing
+ */
+static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "can't allocate iwarp tcp cid max-count=%d\n",
+ p_hwfn->p_rdma_info->tcp_cid_map.max_count);
+
+ *cid = QED_IWARP_INVALID_TCP_CID;
+ return rc;
+ }
+
+ *cid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ return 0;
+}
+
+int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct iwarp_create_qp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue;
+ u32 cid;
+ int rc;
+
+ qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ &qp->shared_queue_phys_addr,
+ GFP_KERNEL);
+ if (!qp->shared_queue)
+ return -ENOMEM;
+
+ out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+ out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+ out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+ out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ goto err1;
+
+ qp->icid = (u16)cid;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.cid = qp->icid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ goto err2;
+
+ p_ramrod = &p_ent->ramrod.iwarp_create_qp;
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ p_ramrod->pd = qp->pd;
+ p_ramrod->sq_num_pages = qp->sq_num_pages;
+ p_ramrod->rq_num_pages = qp->rq_num_pages;
+
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+
+ p_ramrod->cq_cid_for_sq =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
+ p_ramrod->cq_cid_for_rq =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
+
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
+ physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err2;
+
+ return rc;
+
+err2:
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+err1:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ qp->shared_queue, qp->shared_queue_phys_addr);
+
+ return rc;
+}
+
+static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct iwarp_modify_qp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
+ SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
+ 0x1);
+ if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
+ p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
+ else
+ p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
+
+ return rc;
+}
+
+enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
+{
+ switch (state) {
+ case QED_ROCE_QP_STATE_RESET:
+ case QED_ROCE_QP_STATE_INIT:
+ case QED_ROCE_QP_STATE_RTR:
+ return QED_IWARP_QP_STATE_IDLE;
+ case QED_ROCE_QP_STATE_RTS:
+ return QED_IWARP_QP_STATE_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return QED_IWARP_QP_STATE_CLOSING;
+ case QED_ROCE_QP_STATE_ERR:
+ return QED_IWARP_QP_STATE_ERROR;
+ case QED_ROCE_QP_STATE_SQE:
+ return QED_IWARP_QP_STATE_TERMINATE;
+ default:
+ return QED_IWARP_QP_STATE_ERROR;
+ }
+}
+
+static enum qed_roce_qp_state
+qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
+{
+ switch (state) {
+ case QED_IWARP_QP_STATE_IDLE:
+ return QED_ROCE_QP_STATE_INIT;
+ case QED_IWARP_QP_STATE_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case QED_IWARP_QP_STATE_TERMINATE:
+ return QED_ROCE_QP_STATE_SQE;
+ case QED_IWARP_QP_STATE_CLOSING:
+ return QED_ROCE_QP_STATE_SQD;
+ case QED_IWARP_QP_STATE_ERROR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+const char *iwarp_state_names[] = {
+ "IDLE",
+ "RTS",
+ "TERMINATE",
+ "CLOSING",
+ "ERROR",
+};
+
+int
+qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_iwarp_qp_state new_state, bool internal)
+{
+ enum qed_iwarp_qp_state prev_iw_state;
+ bool modify_fw = false;
+ int rc = 0;
+
+ /* modify QP can be called from upper-layer or as a result of async
+ * RST/FIN... therefore need to protect
+ */
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ prev_iw_state = qp->iwarp_state;
+
+ if (prev_iw_state == new_state) {
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ return 0;
+ }
+
+ switch (prev_iw_state) {
+ case QED_IWARP_QP_STATE_IDLE:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_RTS:
+ qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
+ if (!internal)
+ modify_fw = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case QED_IWARP_QP_STATE_RTS:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_CLOSING:
+ if (!internal)
+ modify_fw = true;
+
+ qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ if (!internal)
+ modify_fw = true;
+ qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
+ break;
+ default:
+ break;
+ }
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_IDLE:
+
+ qp->iwarp_state = new_state;
+ break;
+ case QED_IWARP_QP_STATE_CLOSING:
+ /* could happen due to race... do nothing.... */
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ break;
+ case QED_IWARP_QP_STATE_TERMINATE:
+ case QED_IWARP_QP_STATE_CLOSING:
+ qp->iwarp_state = new_state;
+ break;
+ default:
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
+ qp->icid,
+ iwarp_state_names[prev_iw_state],
+ iwarp_state_names[qp->iwarp_state],
+ internal ? "internal" : "");
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+
+ if (modify_fw)
+ rc = qed_iwarp_modify_fw(p_hwfn, qp);
+
+ return rc;
+}
+
+int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
+
+ return rc;
+}
+
+static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ bool remove_from_active_list)
+{
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*ep->ep_buffer_virt),
+ ep->ep_buffer_virt, ep->ep_buffer_phys);
+
+ if (remove_from_active_list) {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+
+ if (ep->qp)
+ ep->qp->ep = NULL;
+
+ kfree(ep);
+}
+
+int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct qed_iwarp_ep *ep = qp->ep;
+ int wait_count = 0;
+ int rc = 0;
+
+ if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
+ rc = qed_iwarp_modify_qp(p_hwfn, qp,
+ QED_IWARP_QP_STATE_ERROR, false);
+ if (rc)
+ return rc;
+ }
+
+ /* Make sure ep is closed before returning and freeing memory. */
+ if (ep) {
+ while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
+ msleep(100);
+
+ if (ep->state != QED_IWARP_EP_CLOSED)
+ DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
+ ep->state);
+
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+ }
+
+ rc = qed_iwarp_fw_destroy(p_hwfn, qp);
+
+ if (qp->shared_queue)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ qp->shared_queue, qp->shared_queue_phys_addr);
+
+ return rc;
+}
+
+static int
+qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
+{
+ struct qed_iwarp_ep *ep;
+ int rc;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->state = QED_IWARP_EP_INIT;
+
+ ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*ep->ep_buffer_virt),
+ &ep->ep_buffer_phys,
+ GFP_KERNEL);
+ if (!ep->ep_buffer_virt) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ep->sig = QED_EP_SIG;
+
+ *ep_out = ep;
+
+ return 0;
+
+err:
+ kfree(ep);
+ return rc;
+}
+
+static void
+qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
+ p_tcp_ramrod->tcp.local_mac_addr_lo,
+ p_tcp_ramrod->tcp.local_mac_addr_mid,
+ p_tcp_ramrod->tcp.local_mac_addr_hi,
+ p_tcp_ramrod->tcp.remote_mac_addr_lo,
+ p_tcp_ramrod->tcp.remote_mac_addr_mid,
+ p_tcp_ramrod->tcp.remote_mac_addr_hi);
+
+ if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n",
+ p_tcp_ramrod->tcp.local_ip,
+ p_tcp_ramrod->tcp.local_port,
+ p_tcp_ramrod->tcp.remote_ip,
+ p_tcp_ramrod->tcp.remote_port,
+ p_tcp_ramrod->tcp.vlan_id);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n",
+ p_tcp_ramrod->tcp.local_ip,
+ p_tcp_ramrod->tcp.local_port,
+ p_tcp_ramrod->tcp.remote_ip,
+ p_tcp_ramrod->tcp.remote_port,
+ p_tcp_ramrod->tcp.vlan_id);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
+ p_tcp_ramrod->tcp.flow_label,
+ p_tcp_ramrod->tcp.ttl,
+ p_tcp_ramrod->tcp.tos_or_tc,
+ p_tcp_ramrod->tcp.mss,
+ p_tcp_ramrod->tcp.rcv_wnd_scale,
+ p_tcp_ramrod->tcp.connect_mode,
+ p_tcp_ramrod->tcp.flags);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
+ p_tcp_ramrod->tcp.syn_ip_payload_length,
+ p_tcp_ramrod->tcp.syn_phy_addr_lo,
+ p_tcp_ramrod->tcp.syn_phy_addr_hi);
+}
+
+static int
+qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
+ struct tcp_offload_params_opt2 *tcp;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t async_output_phys;
+ dma_addr_t in_pdata_phys;
+ u16 physical_q;
+ u8 tcp_flags;
+ int rc;
+ int i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = ep->tcp_cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE)
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ else
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ return rc;
+
+ p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
+
+ in_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, in_pdata);
+ DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
+ in_pdata_phys);
+
+ p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
+ cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+
+ async_output_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, async_output);
+ DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
+ async_output_phys);
+
+ p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+ p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
+ p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
+
+ tcp = &p_tcp_ramrod->tcp;
+ qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
+ &tcp->remote_mac_addr_mid,
+ &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
+ qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
+ &tcp->local_mac_addr_lo, ep->local_mac_addr);
+
+ tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
+
+ tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
+ tcp->flags = 0;
+ SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+ !!(tcp_flags & QED_IWARP_TS_EN));
+
+ SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+ !!(tcp_flags & QED_IWARP_DA_EN));
+
+ tcp->ip_version = ep->cm_info.ip_version;
+
+ for (i = 0; i < 4; i++) {
+ tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
+ tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
+ }
+
+ tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
+ tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
+ tcp->mss = cpu_to_le16(ep->mss);
+ tcp->flow_label = 0;
+ tcp->ttl = 0x40;
+ tcp->tos_or_tc = 0;
+
+ tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
+ tcp->connect_mode = ep->connect_mode;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ tcp->syn_ip_payload_length =
+ cpu_to_le16(ep->syn_ip_payload_length);
+ tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
+ tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
+ }
+
+ qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
+
+ return rc;
+}
+
+static void
+qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_cm_event_params params;
+ struct mpa_v2_hdr *mpa_v2;
+ union async_output *async_data;
+ u16 mpa_ord, mpa_ird;
+ u8 mpa_hdr_size = 0;
+ u8 mpa_rev;
+
+ async_data = &ep->ep_buffer_virt->async_output;
+
+ mpa_rev = async_data->mpa_request.mpa_handshake_mode;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
+ async_data->mpa_request.ulp_data_len,
+ mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
+
+ if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+ /* Read ord/ird values from private data buffer */
+ mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
+ mpa_hdr_size = sizeof(*mpa_v2);
+
+ mpa_ord = ntohs(mpa_v2->ord);
+ mpa_ird = ntohs(mpa_v2->ird);
+
+ /* Temprary store in cm_info incoming ord/ird requested, later
+ * replace with negotiated value during accept
+ */
+ ep->cm_info.ord = (u8)min_t(u16,
+ (mpa_ord & MPA_V2_IRD_ORD_MASK),
+ QED_IWARP_ORD_DEFAULT);
+
+ ep->cm_info.ird = (u8)min_t(u16,
+ (mpa_ird & MPA_V2_IRD_ORD_MASK),
+ QED_IWARP_IRD_DEFAULT);
+
+ /* Peer2Peer negotiation */
+ ep->rtr_type = MPA_RTR_TYPE_NONE;
+ if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
+ if (mpa_ord & MPA_V2_WRITE_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
+
+ if (mpa_ord & MPA_V2_READ_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
+
+ if (mpa_ird & MPA_V2_SEND_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
+
+ ep->rtr_type &= iwarp_info->rtr_type;
+
+ /* if we're left with no match send our capabilities */
+ if (ep->rtr_type == MPA_RTR_TYPE_NONE)
+ ep->rtr_type = iwarp_info->rtr_type;
+ }
+
+ ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+ } else {
+ ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
+ ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
+ ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
+ mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
+ async_data->mpa_request.ulp_data_len, mpa_hdr_size);
+
+ /* Strip mpa v2 hdr from private data before sending to upper layer */
+ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
+
+ ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
+ mpa_hdr_size;
+
+ params.event = QED_IWARP_EVENT_MPA_REQUEST;
+ params.cm_info = &ep->cm_info;
+ params.ep_context = ep;
+ params.status = 0;
+
+ ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
+ ep->event_cb(ep->cb_context, &params);
+}
+
+static int
+qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t async_output_phys;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t out_pdata_phys;
+ dma_addr_t in_pdata_phys;
+ struct qed_rdma_qp *qp;
+ bool reject;
+ int rc;
+
+ if (!ep)
+ return -EINVAL;
+
+ qp = ep->qp;
+ reject = !qp;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = reject ? ep->tcp_cid : qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ else
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ return rc;
+
+ p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+ out_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, out_pdata);
+ DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
+ out_pdata_phys);
+ p_mpa_ramrod->common.outgoing_ulp_buffer.len =
+ ep->cm_info.private_data_len;
+ p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+
+ p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
+ p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
+
+ p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+
+ in_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, in_pdata);
+ p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
+ DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
+ in_pdata_phys);
+ p_mpa_ramrod->incoming_ulp_buffer.len =
+ cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+ async_output_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, async_output);
+ DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
+ async_output_phys);
+ p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+ p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+ if (!reject) {
+ DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
+ qp->shared_queue_phys_addr);
+ p_mpa_ramrod->stats_counter_id =
+ RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
+ } else {
+ p_mpa_ramrod->common.reject = 1;
+ }
+
+ p_mpa_ramrod->mode = ep->mpa_rev;
+ SET_FIELD(p_mpa_ramrod->rtr_pref,
+ IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
+
+ ep->state = QED_IWARP_EP_MPA_OFFLOADED;
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (!reject)
+ ep->cid = qp->icid; /* Now they're migrated. */
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
+ reject ? 0xffff : qp->icid,
+ ep->tcp_cid,
+ rc,
+ ep->cm_info.ird,
+ ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
+ return rc;
+}
+
+static void
+qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ ep->state = QED_IWARP_EP_INIT;
+ if (ep->qp)
+ ep->qp->ep = NULL;
+ ep->qp = NULL;
+ memset(&ep->cm_info, 0, sizeof(ep->cm_info));
+
+ if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+ /* We don't care about the return code, it's ok if tcp_cid
+ * remains invalid...in this case we'll defer allocation
+ */
+ qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+ }
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ list_del(&ep->list_entry);
+ list_add_tail(&ep->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+}
+
+void
+qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct mpa_v2_hdr *mpa_v2_params;
+ union async_output *async_data;
+ u16 mpa_ird, mpa_ord;
+ u8 mpa_data_size = 0;
+
+ if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
+ mpa_v2_params =
+ (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
+ mpa_data_size = sizeof(*mpa_v2_params);
+ mpa_ird = ntohs(mpa_v2_params->ird);
+ mpa_ord = ntohs(mpa_v2_params->ord);
+
+ ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
+ ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
+ }
+ async_data = &ep->ep_buffer_virt->async_output;
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
+ ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
+ mpa_data_size;
+}
+
+void
+qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ DP_NOTICE(p_hwfn,
+ "MPA reply event not expected on passive side!\n");
+ return;
+ }
+
+ params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
+
+ qed_iwarp_parse_private_data(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+ ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+ params.cm_info = &ep->cm_info;
+ params.ep_context = ep;
+ params.status = 0;
+
+ ep->mpa_reply_processed = true;
+
+ ep->event_cb(ep->cb_context, &params);
+}
+
+#define QED_IWARP_CONNECT_MODE_STRING(ep) \
+ ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
+
+/* Called as a result of the event:
+ * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
+ */
+static void
+qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+ params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+ else
+ params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
+ qed_iwarp_parse_private_data(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+ ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+ params.cm_info = &ep->cm_info;
+
+ params.ep_context = ep;
+
+ ep->state = QED_IWARP_EP_CLOSED;
+
+ switch (fw_return_code) {
+ case RDMA_RETURN_OK:
+ ep->qp->max_rd_atomic_req = ep->cm_info.ord;
+ ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
+ qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
+ ep->state = QED_IWARP_EP_ESTABLISHED;
+ params.status = 0;
+ break;
+ case IWARP_CONN_ERROR_MPA_TIMEOUT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -EBUSY;
+ break;
+ case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_RST:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
+ ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_MPA_FIN:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INSUF_IRD:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_TERMINATE:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ default:
+ params.status = -ECONNRESET;
+ break;
+ }
+
+ ep->event_cb(ep->cb_context, &params);
+
+ /* on passive side, if there is no associated QP (REJECT) we need to
+ * return the ep to the pool, (in the regular case we add an element
+ * in accept instead of this one.
+ * In both cases we need to remove it from the ep_list.
+ */
+ if (fw_return_code != RDMA_RETURN_OK) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
+ (!ep->qp)) { /* Rejected */
+ qed_iwarp_return_ep(p_hwfn, ep);
+ } else {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+ }
+}
+
+static void
+qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 *mpa_data_size)
+{
+ struct mpa_v2_hdr *mpa_v2_params;
+ u16 mpa_ird, mpa_ord;
+
+ *mpa_data_size = 0;
+ if (MPA_REV2(ep->mpa_rev)) {
+ mpa_v2_params =
+ (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
+ *mpa_data_size = sizeof(*mpa_v2_params);
+
+ mpa_ird = (u16)ep->cm_info.ird;
+ mpa_ord = (u16)ep->cm_info.ord;
+
+ if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
+ mpa_ird |= MPA_V2_PEER2PEER_MODEL;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
+ mpa_ird |= MPA_V2_SEND_RTR;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
+ mpa_ord |= MPA_V2_WRITE_RTR;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
+ mpa_ord |= MPA_V2_READ_RTR;
+ }
+
+ mpa_v2_params->ird = htons(mpa_ird);
+ mpa_v2_params->ord = htons(mpa_ord);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
+ mpa_v2_params->ird,
+ mpa_v2_params->ord,
+ *((u32 *)mpa_v2_params),
+ mpa_ord & MPA_V2_IRD_ORD_MASK,
+ mpa_ird & MPA_V2_IRD_ORD_MASK,
+ !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
+ !!(mpa_ird & MPA_V2_SEND_RTR),
+ !!(mpa_ord & MPA_V2_WRITE_RTR),
+ !!(mpa_ord & MPA_V2_READ_RTR));
+ }
+}
+
+int qed_iwarp_connect(void *rdma_cxt,
+ struct qed_iwarp_connect_in *iparams,
+ struct qed_iwarp_connect_out *oparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+ u8 ts_hdr_size = 0;
+ u32 cid;
+ int rc;
+
+ if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
+ (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
+ DP_NOTICE(p_hwfn,
+ "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+ iparams->qp->icid, iparams->cm_info.ord,
+ iparams->cm_info.ird);
+
+ return -EINVAL;
+ }
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ /* Allocate ep object */
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ return rc;
+
+ rc = qed_iwarp_create_ep(p_hwfn, &ep);
+ if (rc)
+ goto err;
+
+ ep->tcp_cid = cid;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep->qp = iparams->qp;
+ ep->qp->ep = ep;
+ ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
+ ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
+ memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
+
+ ep->cm_info.ord = iparams->cm_info.ord;
+ ep->cm_info.ird = iparams->cm_info.ird;
+
+ ep->rtr_type = iwarp_info->rtr_type;
+ if (!iwarp_info->peer2peer)
+ ep->rtr_type = MPA_RTR_TYPE_NONE;
+
+ if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
+ ep->cm_info.ord = 1;
+
+ ep->mpa_rev = iwarp_info->mpa_rev;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->cm_info.private_data,
+ iparams->cm_info.private_data_len);
+
+ if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+ ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+ ep->mss = iparams->mss - ts_hdr_size;
+ ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+ ep->event_cb = iparams->event_cb;
+ ep->cb_context = iparams->cb_context;
+ ep->connect_mode = TCP_CONNECT_ACTIVE;
+
+ oparams->ep_context = ep;
+
+ rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
+ iparams->qp->icid, ep->tcp_cid, rc);
+
+ if (rc) {
+ qed_iwarp_destroy_ep(p_hwfn, ep, true);
+ goto err;
+ }
+
+ return rc;
+err:
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+ return rc;
+}
+
+static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_ep *ep = NULL;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+ DP_ERR(p_hwfn, "Ep list is empty\n");
+ goto out;
+ }
+
+ ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+ struct qed_iwarp_ep, list_entry);
+
+ /* in some cases we could have failed allocating a tcp cid when added
+ * from accept / failure... retry now..this is not the common case.
+ */
+ if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+ rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+
+ /* if we fail we could look for another entry with a valid
+ * tcp_cid, but since we don't expect to reach this anyway
+ * it's not worth the handling
+ */
+ if (rc) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ ep = NULL;
+ goto out;
+ }
+ }
+
+ list_del(&ep->list_entry);
+
+out:
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ return ep;
+}
+
+#define QED_IWARP_MAX_CID_CLEAN_TIME 100
+#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
+
+/* This function waits for all the bits of a bmap to be cleared, as long as
+ * there is progress ( i.e. the number of bits left to be cleared decreases )
+ * the function continues.
+ */
+static int
+qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
+{
+ int prev_weight = 0;
+ int wait_count = 0;
+ int weight = 0;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ prev_weight = weight;
+
+ while (weight) {
+ msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+
+ if (prev_weight == weight) {
+ wait_count++;
+ } else {
+ prev_weight = weight;
+ wait_count = 0;
+ }
+
+ if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
+ DP_NOTICE(p_hwfn,
+ "%s bitmap wait timed out (%d cids pending)\n",
+ bmap->name, weight);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+ int i;
+
+ rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
+ &p_hwfn->p_rdma_info->tcp_cid_map);
+ if (rc)
+ return rc;
+
+ /* Now free the tcp cids from the main cid map */
+ for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
+
+ /* Now wait for all cids to be completed */
+ return qed_iwarp_wait_cid_map_cleared(p_hwfn,
+ &p_hwfn->p_rdma_info->cid_map);
+}
+
+static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_ep *ep;
+
+ while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+ struct qed_iwarp_ep, list_entry);
+
+ if (!ep) {
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ break;
+ }
+ list_del(&ep->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
+ qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
+
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+ }
+}
+
+static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
+{
+ struct qed_iwarp_ep *ep;
+ int rc = 0;
+ int count;
+ u32 cid;
+ int i;
+
+ count = init ? QED_IWARP_PREALLOC_CNT : 1;
+ for (i = 0; i < count; i++) {
+ rc = qed_iwarp_create_ep(p_hwfn, &ep);
+ if (rc)
+ return rc;
+
+ /* During initialization we allocate from the main pool,
+ * afterwards we allocate only from the tcp_cid.
+ */
+ if (init) {
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ goto err;
+ qed_iwarp_set_tcp_cid(p_hwfn, cid);
+ } else {
+ /* We don't care about the return code, it's ok if
+ * tcp_cid remains invalid...in this case we'll
+ * defer allocation
+ */
+ qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
+ }
+
+ ep->tcp_cid = cid;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+
+ return rc;
+
+err:
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+
+ return rc;
+}
+
+int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate bitmap for tcp cid. These are used by passive side
+ * to ensure it can allocate a tcp cid during dpc that was
+ * pre-acquired and doesn't require dynamic allocation of ilt
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+ QED_IWARP_PREALLOC_CNT, "TCP_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate tcp cid, rc = %d\n", rc);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
+ spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ return qed_iwarp_prealloc_ep(p_hwfn, true);
+}
+
+void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
+{
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+}
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+ int rc;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+ iparams->qp->icid, ep->tcp_cid);
+
+ if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
+ (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+ iparams->qp->icid,
+ ep->tcp_cid, iparams->ord, iparams->ord);
+ return -EINVAL;
+ }
+
+ qed_iwarp_prealloc_ep(p_hwfn, false);
+
+ ep->cb_context = iparams->cb_context;
+ ep->qp = iparams->qp;
+ ep->qp->ep = ep;
+
+ if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+ /* Negotiate ord/ird: if upperlayer requested ord larger than
+ * ird advertised by remote, we need to decrease our ord
+ */
+ if (iparams->ord > ep->cm_info.ird)
+ iparams->ord = ep->cm_info.ird;
+
+ if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
+ (iparams->ird == 0))
+ iparams->ird = 1;
+ }
+
+ /* Update cm_info ord/ird to be negotiated values */
+ ep->cm_info.ord = iparams->ord;
+ ep->cm_info.ird = iparams->ird;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->private_data, iparams->private_data_len);
+
+ rc = qed_iwarp_mpa_offload(p_hwfn, ep);
+ if (rc)
+ qed_iwarp_modify_qp(p_hwfn,
+ iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
+
+ return rc;
+}
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
+
+ ep->cb_context = iparams->cb_context;
+ ep->qp = NULL;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->private_data, iparams->private_data_len);
+
+ return qed_iwarp_mpa_offload(p_hwfn, ep);
+}
+
+static void
+qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
+ cm_info->ip_version);
+
+ if (cm_info->ip_version == QED_TCP_IPV4)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
+ cm_info->remote_ip, cm_info->remote_port,
+ cm_info->local_ip, cm_info->local_port,
+ cm_info->vlan);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n",
+ cm_info->remote_ip, cm_info->remote_port,
+ cm_info->local_ip, cm_info->local_port,
+ cm_info->vlan);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "private_data_len = %x ord = %d, ird = %d\n",
+ cm_info->private_data_len, cm_info->ord, cm_info->ird);
+}
+
+static int
+qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_buff *buf, u8 handle)
+{
+ int rc;
+
+ rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
+ (u16)buf->buff_size, buf, 1);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
+ rc, handle);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
+ buf->data, buf->data_phys_addr);
+ kfree(buf);
+ }
+
+ return rc;
+}
+
+static bool
+qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
+{
+ struct qed_iwarp_ep *ep = NULL;
+ bool found = false;
+
+ list_for_each_entry(ep,
+ &p_hwfn->p_rdma_info->iwarp.ep_list,
+ list_entry) {
+ if ((ep->cm_info.local_port == cm_info->local_port) &&
+ (ep->cm_info.remote_port == cm_info->remote_port) &&
+ (ep->cm_info.vlan == cm_info->vlan) &&
+ !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
+ sizeof(cm_info->local_ip)) &&
+ !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
+ sizeof(cm_info->remote_ip))) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ DP_NOTICE(p_hwfn,
+ "SYN received on active connection - dropping\n");
+ qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct qed_iwarp_listener *
+qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info)
+{
+ struct qed_iwarp_listener *listener = NULL;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ bool found = false;
+
+ qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+ list_for_each_entry(listener,
+ &p_hwfn->p_rdma_info->iwarp.listen_list,
+ list_entry) {
+ if (listener->port == cm_info->local_port) {
+ if (!memcmp(listener->ip_addr,
+ ip_zero, sizeof(ip_zero))) {
+ found = true;
+ break;
+ }
+
+ if (!memcmp(listener->ip_addr,
+ cm_info->local_ip,
+ sizeof(cm_info->local_ip)) &&
+ (listener->vlan == cm_info->vlan)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
+ listener);
+ return listener;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
+ return NULL;
+}
+
+static int
+qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info,
+ void *buf,
+ u8 *remote_mac_addr,
+ u8 *local_mac_addr,
+ int *payload_len, int *tcp_start_offset)
+{
+ struct vlan_ethhdr *vethh;
+ bool vlan_valid = false;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ int eth_hlen;
+ int ip_hlen;
+ int eth_type;
+ int i;
+
+ ethh = buf;
+ eth_type = ntohs(ethh->h_proto);
+ if (eth_type == ETH_P_8021Q) {
+ vlan_valid = true;
+ vethh = (struct vlan_ethhdr *)ethh;
+ cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
+ eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
+ }
+
+ eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
+
+ ether_addr_copy(remote_mac_addr, ethh->h_source);
+ ether_addr_copy(local_mac_addr, ethh->h_dest);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
+ eth_type, ethh->h_source);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
+ eth_hlen, ethh->h_dest);
+
+ iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
+
+ if (eth_type == ETH_P_IP) {
+ cm_info->local_ip[0] = ntohl(iph->daddr);
+ cm_info->remote_ip[0] = ntohl(iph->saddr);
+ cm_info->ip_version = TCP_IPV4;
+
+ ip_hlen = (iph->ihl) * sizeof(u32);
+ *payload_len = ntohs(iph->tot_len) - ip_hlen;
+ } else if (eth_type == ETH_P_IPV6) {
+ ip6h = (struct ipv6hdr *)iph;
+ for (i = 0; i < 4; i++) {
+ cm_info->local_ip[i] =
+ ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
+ cm_info->remote_ip[i] =
+ ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
+ }
+ cm_info->ip_version = TCP_IPV6;
+
+ ip_hlen = sizeof(*ip6h);
+ *payload_len = ntohs(ip6h->payload_len);
+ } else {
+ DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
+ return -EINVAL;
+ }
+
+ tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
+
+ if (!tcph->syn) {
+ DP_NOTICE(p_hwfn,
+ "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
+ iph->ihl, tcph->source, tcph->dest);
+ return -EINVAL;
+ }
+
+ cm_info->local_port = ntohs(tcph->dest);
+ cm_info->remote_port = ntohs(tcph->source);
+
+ qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+ *tcp_start_offset = eth_hlen + ip_hlen;
+
+ return 0;
+}
+
+static void
+qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_buff *buf = data->cookie;
+ struct qed_iwarp_listener *listener;
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ struct qed_iwarp_cm_info cm_info;
+ struct qed_hwfn *p_hwfn = cxt;
+ u8 remote_mac_addr[ETH_ALEN];
+ u8 local_mac_addr[ETH_ALEN];
+ struct qed_iwarp_ep *ep;
+ int tcp_start_offset;
+ u8 ts_hdr_size = 0;
+ u8 ll2_syn_handle;
+ int payload_len;
+ u32 hdr_size;
+ int rc;
+
+ memset(&cm_info, 0, sizeof(cm_info));
+ ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
+ if (GET_FIELD(data->parse_flags,
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
+ GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
+ DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
+ goto err;
+ }
+
+ rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
+ data->u.placement_offset, remote_mac_addr,
+ local_mac_addr, &payload_len,
+ &tcp_start_offset);
+ if (rc)
+ goto err;
+
+ /* Check if there is a listener for this 4-tuple+vlan */
+ listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
+ if (!listener) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
+ data->parse_flags, data->length.packet_length);
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.vlan = data->vlan;
+
+ if (GET_FIELD(data->parse_flags,
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
+ SET_FIELD(tx_pkt.bd_flags,
+ CORE_TX_BD_DATA_VLAN_INSERTION, 1);
+
+ tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.first_frag = buf->data_phys_addr +
+ data->u.placement_offset;
+ tx_pkt.first_frag_len = data->length.packet_length;
+ tx_pkt.cookie = buf;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
+ &tx_pkt, true);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Can't post SYN back to chip rc=%d\n", rc);
+ goto err;
+ }
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
+ /* There may be an open ep on this connection if this is a syn
+ * retrasnmit... need to make sure there isn't...
+ */
+ if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
+ goto err;
+
+ ep = qed_iwarp_get_free_ep(p_hwfn);
+ if (!ep)
+ goto err;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
+ ether_addr_copy(ep->local_mac_addr, local_mac_addr);
+
+ memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
+
+ if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+ ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+ hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
+ ts_hdr_size;
+ ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
+ ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+ ep->event_cb = listener->event_cb;
+ ep->cb_context = listener->cb_context;
+ ep->connect_mode = TCP_CONNECT_PASSIVE;
+
+ ep->syn = buf;
+ ep->syn_ip_payload_length = (u16)payload_len;
+ ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
+ tcp_start_offset;
+
+ rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+ if (rc) {
+ qed_iwarp_return_ep(p_hwfn, ep);
+ goto err;
+ }
+
+ return;
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
+}
+
+static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+ kfree(buffer);
+}
+
+static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ /* this was originally an rx packet, post it back */
+ qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
+}
+
+static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ if (!buffer)
+ return;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+
+ kfree(buffer);
+}
+
+static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ int rc = 0;
+
+ if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ qed_llh_remove_mac_filter(p_hwfn,
+ p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return rc;
+}
+
+static int
+qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
+ int num_rx_bufs, int buff_size, u8 ll2_handle)
+{
+ struct qed_iwarp_ll2_buff *buffer;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < num_rx_bufs; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ buff_size,
+ &buffer->data_phys_addr,
+ GFP_KERNEL);
+ if (!buffer->data) {
+ kfree(buffer);
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->buff_size = buff_size;
+ rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
+ if (rc)
+ /* buffers will be deallocated by qed_ll2 */
+ break;
+ }
+ return rc;
+}
+
+#define QED_IWARP_MAX_BUF_SIZE(mtu) \
+ ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
+ ETH_CACHE_LINE_SIZE)
+
+static int
+qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_ll2_acquire_data data;
+ struct qed_ll2_cbs cbs;
+ int rc = 0;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+
+ iwarp_info->max_mtu = params->max_mtu;
+
+ ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
+
+ rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ if (rc)
+ return rc;
+
+ /* Start SYN connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
+ cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
+ cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
+ cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
+ cbs.cookie = p_hwfn;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+ data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
+ data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
+ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
+ data.p_connection_handle = &iwarp_info->ll2_syn_handle;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
+ qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ return rc;
+ }
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
+ goto err;
+ }
+
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ QED_IWARP_LL2_SYN_RX_SIZE,
+ QED_IWARP_MAX_SYN_PKT_SIZE,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ goto err;
+
+ return rc;
+err:
+ qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_iwarp_info *iwarp_info;
+ u32 rcv_wnd_size;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ iwarp_info->tcp_flags = QED_IWARP_TS_EN;
+ rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
+
+ /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
+ iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
+ ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+ iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
+ iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+
+ iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
+
+ iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
+ MPA_RTR_TYPE_ZERO_WRITE |
+ MPA_RTR_TYPE_ZERO_READ;
+
+ spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
+
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
+ qed_iwarp_async_event);
+
+ return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
+}
+
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ qed_iwarp_free_prealloc_ep(p_hwfn);
+ rc = qed_iwarp_wait_for_all_cids(p_hwfn);
+ if (rc)
+ return rc;
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
+
+ return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+}
+
+void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
+
+ params.event = QED_IWARP_EVENT_CLOSE;
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
+ 0 : -ECONNRESET;
+
+ ep->state = QED_IWARP_EP_CLOSED;
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep->event_cb(ep->cb_context, &params);
+}
+
+void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, int fw_ret_code)
+{
+ struct qed_iwarp_cm_event_params params;
+ bool event_cb = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
+ ep->cid, fw_ret_code);
+
+ switch (fw_ret_code) {
+ case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
+ params.status = 0;
+ params.event = QED_IWARP_EVENT_DISCONNECT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LLP_RESET:
+ params.status = -ECONNRESET;
+ params.event = QED_IWARP_EVENT_DISCONNECT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
+ params.event = QED_IWARP_EVENT_RQ_EMPTY;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
+ params.event = QED_IWARP_EVENT_IRQ_FULL;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
+ params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
+ params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
+ params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
+ params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
+ params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
+ params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
+ params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
+ event_cb = true;
+ break;
+ default:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Unhandled exception received...fw_ret_code=%d\n",
+ fw_ret_code);
+ break;
+ }
+
+ if (event_cb) {
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ ep->event_cb(ep->cb_context, &params);
+ }
+}
+
+static void
+qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ ep->state = QED_IWARP_EP_CLOSED;
+
+ switch (fw_return_code) {
+ case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "%s(0x%x) TCP connect got invalid packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "%s(0x%x) TCP Connection Reset\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -EBUSY;
+ break;
+ case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "%s(0x%x) Unexpected return code tcp connect: %d\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep),
+ ep->tcp_cid, fw_return_code);
+ params.status = -ECONNRESET;
+ break;
+ }
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ qed_iwarp_return_ep(p_hwfn, ep);
+ } else {
+ ep->event_cb(ep->cb_context, &params);
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+}
+
+void
+qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ /* Done with the SYN packet, post back to ll2 rx */
+ qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
+
+ ep->syn = NULL;
+
+ /* If connect failed - upper layer doesn't know about it */
+ if (fw_return_code == RDMA_RETURN_OK)
+ qed_iwarp_mpa_received(p_hwfn, ep);
+ else
+ qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
+ fw_return_code);
+ } else {
+ if (fw_return_code == RDMA_RETURN_OK)
+ qed_iwarp_mpa_offload(p_hwfn, ep);
+ else
+ qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
+ fw_return_code);
+ }
+}
+
+static inline bool
+qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ if (!ep || (ep->sig != QED_EP_SIG)) {
+ DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
+ return false;
+ }
+
+ return true;
+}
+
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct regpair *fw_handle = &data->rdma_data.async_handle;
+ struct qed_iwarp_ep *ep = NULL;
+ u16 cid;
+
+ ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
+ fw_handle->lo);
+
+ switch (fw_event_code) {
+ case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
+ /* Async completion after TCP 3-way handshake */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
+ ep->tcp_cid, fw_return_code);
+ qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
+ /* Async completion for Close Connection ramrod */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
+ /* Async event for active side only */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
+ cid = (u16)le32_to_cpu(fw_handle->lo);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
+
+ p_hwfn->p_rdma_info->events.affiliated_event(
+ p_hwfn->p_rdma_info->events.context,
+ QED_IWARP_EVENT_CQ_OVERFLOW,
+ (void *)fw_handle);
+ break;
+ default:
+ DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
+ fw_event_code);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+qed_iwarp_create_listen(void *rdma_cxt,
+ struct qed_iwarp_listen_in *iparams,
+ struct qed_iwarp_listen_out *oparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_listener *listener;
+
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->ip_version = iparams->ip_version;
+ memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
+ listener->port = iparams->port;
+ listener->vlan = iparams->vlan;
+
+ listener->event_cb = iparams->event_cb;
+ listener->cb_context = iparams->cb_context;
+ listener->max_backlog = iparams->max_backlog;
+ oparams->handle = listener;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&listener->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.listen_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
+ listener->event_cb,
+ listener,
+ listener->ip_addr[0],
+ listener->ip_addr[1],
+ listener->ip_addr[2],
+ listener->ip_addr[3], listener->port, listener->vlan);
+
+ return 0;
+}
+
+int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
+{
+ struct qed_iwarp_listener *listener = handle;
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&listener->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ kfree(listener);
+
+ return 0;
+}
+
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_iwarp_ep *ep;
+ struct qed_rdma_qp *qp;
+ int rc;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
+ return -EINVAL;
+ }
+
+ qp = ep->qp;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+ qp->icid, ep->tcp_cid);
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ PROTOCOLID_IWARP, &init_data);
+
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
+
+ return rc;
+}
+
+void
+qed_iwarp_query_qp(struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
new file mode 100644
index 000000000000..148ef3c33a5d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -0,0 +1,189 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_IWARP_H
+#define _QED_IWARP_H
+
+enum qed_iwarp_qp_state {
+ QED_IWARP_QP_STATE_IDLE,
+ QED_IWARP_QP_STATE_RTS,
+ QED_IWARP_QP_STATE_TERMINATE,
+ QED_IWARP_QP_STATE_CLOSING,
+ QED_IWARP_QP_STATE_ERROR,
+};
+
+enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
+
+#define QED_IWARP_PREALLOC_CNT (256)
+
+#define QED_IWARP_LL2_SYN_TX_SIZE (128)
+#define QED_IWARP_LL2_SYN_RX_SIZE (256)
+#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
+#define QED_IWARP_HANDLE_INVAL (0xff)
+
+struct qed_iwarp_ll2_buff {
+ void *data;
+ dma_addr_t data_phys_addr;
+ u32 buff_size;
+};
+
+struct qed_iwarp_info {
+ struct list_head listen_list; /* qed_iwarp_listener */
+ struct list_head ep_list; /* qed_iwarp_ep */
+ struct list_head ep_free_list; /* pre-allocated ep's */
+ spinlock_t iw_lock; /* for iwarp resources */
+ spinlock_t qp_lock; /* for teardown races */
+ u32 rcv_wnd_scale;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 crc_needed;
+ u8 tcp_flags;
+ u8 ll2_syn_handle;
+ u8 peer2peer;
+ enum mpa_negotiation_mode mpa_rev;
+ enum mpa_rtr_type rtr_type;
+};
+
+enum qed_iwarp_ep_state {
+ QED_IWARP_EP_INIT,
+ QED_IWARP_EP_MPA_REQ_RCVD,
+ QED_IWARP_EP_MPA_OFFLOADED,
+ QED_IWARP_EP_ESTABLISHED,
+ QED_IWARP_EP_CLOSED
+};
+
+union async_output {
+ struct iwarp_eqe_data_mpa_async_completion mpa_response;
+ struct iwarp_eqe_data_tcp_async_completion mpa_request;
+};
+
+#define QED_MAX_PRIV_DATA_LEN (512)
+struct qed_iwarp_ep_memory {
+ u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
+ u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
+ union async_output async_output;
+};
+
+/* Endpoint structure represents a TCP connection. This connection can be
+ * associated with a QP or not (in which case QP==NULL)
+ */
+struct qed_iwarp_ep {
+ struct list_head list_entry;
+ struct qed_rdma_qp *qp;
+ struct qed_iwarp_ep_memory *ep_buffer_virt;
+ dma_addr_t ep_buffer_phys;
+ enum qed_iwarp_ep_state state;
+ int sig;
+ struct qed_iwarp_cm_info cm_info;
+ enum tcp_connect_mode connect_mode;
+ enum mpa_rtr_type rtr_type;
+ enum mpa_negotiation_mode mpa_rev;
+ u32 tcp_cid;
+ u32 cid;
+ u16 mss;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool mpa_reply_processed;
+
+ /* For Passive side - syn packet related data */
+ u16 syn_ip_payload_length;
+ struct qed_iwarp_ll2_buff *syn;
+ dma_addr_t syn_phy_addr;
+
+ /* The event_cb function is called for asynchrounous events associated
+ * with the ep. It is initialized at different entry points depending
+ * on whether the ep is the tcp connection active side or passive side
+ * The cb_context is passed to the event_cb function.
+ */
+ iwarp_event_handler event_cb;
+ void *cb_context;
+};
+
+struct qed_iwarp_listener {
+ struct list_head list_entry;
+
+ /* The event_cb function is called for connection requests.
+ * The cb_context is passed to the event_cb function.
+ */
+ iwarp_event_handler event_cb;
+ void *cb_context;
+ u32 max_backlog;
+ u32 ip_addr[4];
+ u16 port;
+ u16 vlan;
+ u8 ip_version;
+};
+
+int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
+
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params);
+
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
+
+void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn);
+
+void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params);
+
+int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp,
+ enum qed_iwarp_qp_state new_state, bool internal);
+
+int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+int
+qed_iwarp_connect(void *rdma_cxt,
+ struct qed_iwarp_connect_in *iparams,
+ struct qed_iwarp_connect_out *oparams);
+
+int
+qed_iwarp_create_listen(void *rdma_cxt,
+ struct qed_iwarp_listen_in *iparams,
+ struct qed_iwarp_listen_out *oparams);
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
+int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
+
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 746fed4099c8..0ba5ec8a9814 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -66,26 +65,161 @@
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+struct qed_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ struct mutex lock;
+};
+
+int qed_l2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ return 0;
+
+ p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
+ if (!p_l2_info)
+ return -ENOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ qed_vf_get_num_rxqs(p_hwfn, &rx);
+ qed_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = max_t(u8, rx, tx);
+ }
+
+ pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
+ GFP_KERNEL);
+ if (!pp_qids)
+ return -ENOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
+ if (!pp_qids[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qed_l2_setup(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+ p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return;
+
+ mutex_init(&p_hwfn->p_l2_info->lock);
+}
+
+void qed_l2_free(struct qed_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+ p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return;
+
+ if (!p_hwfn->p_l2_info)
+ return;
+
+ if (!p_hwfn->p_l2_info->pp_qid_usage)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (!p_hwfn->p_l2_info->pp_qid_usage[i])
+ break;
+ kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+ kfree(p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ kfree(p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = NULL;
+}
+
+static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ mutex_lock(&p_l2_info->lock);
+
+ if (queue_id >= p_l2_info->queues) {
+ DP_NOTICE(p_hwfn,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ mutex_unlock(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ mutex_lock(&p_hwfn->p_l2_info->lock);
+
+ clear_bit(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ mutex_unlock(&p_hwfn->p_l2_info->lock);
+}
+
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
- /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
- if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
- qed_cxt_release_cid(p_hwfn, p_cid->cid);
+ bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
+
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+
+ /* For PF's VFs we maintain the index inside queue-zone in IOV */
+ if (p_cid->vfid == QED_QUEUE_CID_SELF)
+ qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
vfree(p_cid);
}
/* The internal is only meant to be directly called by PFs initializeing CIDs
* for their VFs.
*/
-struct qed_queue_cid *
+static struct qed_queue_cid *
_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u8 vf_qid,
- struct qed_queue_start_common_params *p_params)
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
{
- bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
struct qed_queue_cid *p_cid;
int rc;
@@ -96,10 +230,25 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
- p_cid->vf_qid = vf_qid;
- p_cid->rel = *p_params;
p_cid->p_owner = p_hwfn;
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
+ } else {
+ p_cid->vfid = QED_QUEUE_CID_SELF;
+ }
+
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
p_cid->abs = p_cid->rel;
@@ -121,7 +270,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
/* In case of a PF configuring its VF's queues, the stats-id is already
* absolute [since there's a single index that's suitable per-VF].
*/
- if (b_is_same) {
+ if (p_cid->vfid == QED_QUEUE_CID_SELF) {
rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
&p_cid->abs.stats_id);
if (rc)
@@ -130,27 +279,29 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
- /* SBs relevant information was already provided as absolute */
- p_cid->abs.sb = p_cid->rel.sb;
- p_cid->abs.sb_idx = p_cid->rel.sb_idx;
-
- /* This is tricky - we're actually interested in whehter this is a PF
- * entry meant for the VF.
- */
- if (!b_is_same)
- p_cid->is_vf = true;
out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
- "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
p_cid->opaque_fid,
p_cid->cid,
p_cid->rel.vport_id,
p_cid->abs.vport_id,
p_cid->rel.queue_id,
+ p_cid->qid_usage_idx,
p_cid->abs.queue_id,
p_cid->rel.stats_id,
- p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
+ p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
return p_cid;
@@ -159,32 +310,61 @@ fail:
return NULL;
}
-static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
- u16 opaque_fid, struct
- qed_queue_start_common_params
- *p_params)
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
{
struct qed_queue_cid *p_cid;
+ u8 vfid = QED_CXT_PF_CID;
+ bool b_legacy_vf = false;
u32 cid = 0;
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
/* Get a unique firmware CID for this queue, in case it's a PF.
* VF's don't need a CID as the queue configuration will be done
* by PF.
*/
- if (IS_PF(p_hwfn->cdev)) {
- if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
+ if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid)) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return NULL;
}
}
- p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
- if (!p_cid && IS_PF(p_hwfn->cdev))
- qed_cxt_release_cid(p_hwfn, cid);
+ p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, b_is_rx, p_vf_params);
+ if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, cid, vfid);
return p_cid;
}
+static struct qed_queue_cid *
+qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ bool b_is_rx,
+ struct qed_queue_start_common_params *p_params)
+{
+ return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ NULL);
+}
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
@@ -682,7 +862,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
p_cid->opaque_fid, p_cid->cid,
- p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
+ p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -698,8 +878,8 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
@@ -712,13 +892,15 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_cid->is_vf) {
+ if (p_cid->vfid != QED_QUEUE_CID_SELF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD);
+
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- !!p_cid->b_legacy_vf ? " [legacy]" : "",
- p_cid->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+ b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
}
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -762,7 +944,7 @@ qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
int rc;
/* Allocate a CID for the queue */
- p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (!p_cid)
return -ENOMEM;
@@ -864,10 +1046,11 @@ qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
!b_eq_completion_only) ||
b_cqe_completion;
- p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
+ p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
+ b_eq_completion_only;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -916,8 +1099,8 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
@@ -966,7 +1149,7 @@ qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
- p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (!p_cid)
return -EINVAL;
@@ -1044,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
return action;
}
-static void qed_set_fw_mac_addr(__le16 *fw_msb,
- __le16 *fw_mid,
- __le16 *fw_lsb,
- u8 *mac)
-{
- ((u8 *)fw_msb)[0] = mac[1];
- ((u8 *)fw_msb)[1] = mac[0];
- ((u8 *)fw_mid)[0] = mac[3];
- ((u8 *)fw_mid)[1] = mac[2];
- ((u8 *)fw_lsb)[0] = mac[5];
- ((u8 *)fw_lsb)[1] = mac[4];
-}
-
static int
qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
@@ -1935,15 +2105,26 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ info->xdp_supported = true;
} else {
- qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
- if (cdev->num_hwfns > 1) {
- u8 queues = 0;
+ u16 total_cids = 0;
- qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ /* Determine queues & XDP support */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u8 queues, cids;
+
+ qed_vf_get_num_cids(p_hwfn, &cids);
+ qed_vf_get_num_rxqs(p_hwfn, &queues);
info->num_queues += queues;
+ total_cids += cids;
}
+ /* Enable VF XDP in case PF guarntees sufficient connections */
+ if (total_cids >= info->num_queues * 3)
+ info->xdp_supported = true;
+
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
(u8 *)&info->num_vlan_filters);
qed_vf_get_num_mac_filters(&cdev->hwfns[0],
@@ -2194,9 +2375,9 @@ static int qed_start_rxq(struct qed_dev *cdev,
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
+ p_params->p_sb->igu_sb_id);
return 0;
}
@@ -2244,9 +2425,9 @@ static int qed_start_txq(struct qed_dev *cdev,
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
+ p_params->p_sb->igu_sb_id);
return 0;
}
@@ -2301,14 +2482,25 @@ static int qed_tunn_configure(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
struct qed_tunnel_info *tun;
tun = &hwfn->cdev->tunnel;
+ if (IS_PF(cdev)) {
+ p_ptt = qed_ptt_acquire(hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
- rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
- if (rc)
+ if (rc) {
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
return rc;
+ }
if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port;
@@ -2325,6 +2517,8 @@ static int qed_tunn_configure(struct qed_dev *cdev,
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 6f44229899eb..f8f09aadced7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -277,40 +277,87 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
-struct qed_queue_cid {
- /* 'Relative' is a relative term ;-). Usually the indices [not counting
- * SBs] would be PF-relative, but there are some cases where that isn't
- * the case - specifically for a PF configuring its VF indices it's
- * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define QED_QUEUE_CID_SELF (0xff)
+
+/* Almost identical to the qed_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct qed_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
+ */
+struct qed_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
*/
- struct qed_queue_start_common_params rel;
- struct qed_queue_start_common_params abs;
+ u8 vf_legacy;
+
+ u8 qid_usage_idx;
+};
+
+struct qed_queue_cid {
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct qed_queue_cid_params rel;
+ struct qed_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
u32 cid;
u16 opaque_fid;
+ bool b_is_rx;
+
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
- bool is_vf;
+ u8 vfid;
u8 vf_qid;
- /* Legacy VFs might have Rx producer located elsewhere */
- bool b_legacy_vf;
+ /* We need an additional index to differentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to differentiate].
+ */
+ u8 qid_usage_idx;
+
+ u8 vf_legacy;
+#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0))
+#define QED_QCID_LEGACY_VF_CID (BIT(1))
struct qed_hwfn *p_owner;
};
+int qed_l2_alloc(struct qed_hwfn *p_hwfn);
+void qed_l2_setup(struct qed_hwfn *p_hwfn);
+void qed_l2_free(struct qed_hwfn *p_hwfn);
+
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid);
-struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- u8 vf_qid,
- struct qed_queue_start_common_params
- *p_params);
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params);
int
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 09c86411918c..c06ad4f0758e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,7 +38,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/stddef.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <linux/bitops.h>
@@ -62,7 +61,7 @@
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -74,7 +73,6 @@ struct qed_cb_ll2_info {
int rx_cnt;
u32 rx_size;
u8 handle;
- bool frags_mapped;
/* Lock protecting LL2 buffer lists in sleepless context */
spinlock_t lock;
@@ -90,13 +88,14 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr;
};
-static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_dev *cdev = p_hwfn->cdev;
struct sk_buff *skb = cookie;
@@ -108,12 +107,6 @@ static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
b_last_fragment);
- if (cdev->ll2->frags_mapped)
- /* Case where mapped frags were received, need to
- * free skb with nr_frags marked as 0
- */
- skb_shinfo(skb)->nr_frags = 0;
-
dev_kfree_skb_any(skb);
}
@@ -165,42 +158,34 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
{
- u16 packet_length = le16_to_cpu(p_cqe->packet_length);
- struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_buffer *buffer = data->cookie;
struct qed_dev *cdev = p_hwfn->cdev;
- u16 vlan = le16_to_cpu(p_cqe->vlan);
- u32 opaque_data_0, opaque_data_1;
- u8 pad = p_cqe->placement_offset;
dma_addr_t new_phys_addr;
struct sk_buff *skb;
bool reuse = false;
int rc = -EINVAL;
u8 *new_data;
- opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
- opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
"Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
- (u64)p_pkt->rx_buf_addr, pad, packet_length,
- le16_to_cpu(p_cqe->parse_flags.flags), vlan,
- opaque_data_0, opaque_data_1);
+ (u64)data->rx_buf_addr,
+ data->u.placement_offset,
+ data->length.packet_length,
+ data->parse_flags,
+ data->vlan, data->opaque_data_0, data->opaque_data_1);
if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_OFFSET, 16, 1,
- buffer->data, packet_length, false);
+ buffer->data, data->length.packet_length, false);
}
/* Determine if data is valid */
- if (packet_length < ETH_HLEN)
+ if (data->length.packet_length < ETH_HLEN)
reuse = true;
/* Allocate a replacement for buffer; Reuse upon failure */
@@ -220,9 +205,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
goto out_post;
}
- pad += NET_SKB_PAD;
- skb_reserve(skb, pad);
- skb_put(skb, packet_length);
+ data->u.placement_offset += NET_SKB_PAD;
+ skb_reserve(skb, data->u.placement_offset);
+ skb_put(skb, data->length.packet_length);
skb_checksum_none_assert(skb);
/* Get parital ethernet information instead of eth_type_trans(),
@@ -233,10 +218,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* Pass SKB onward */
if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
- if (vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ if (data->vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ data->vlan);
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
- opaque_data_0, opaque_data_1);
+ data->opaque_data_0,
+ data->opaque_data_1);
}
/* Update Buffer information and update FW producer */
@@ -322,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -334,21 +321,12 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->conn.gsi_enable)
- qed_ll2b_release_tx_gsi_packet(p_hwfn,
- p_ll2_conn->
- my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
- else
- qed_ll2b_complete_tx_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
+ p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
}
}
}
@@ -361,7 +339,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_tx_packet *p_pkt;
bool b_last_frag = false;
unsigned long flags;
- dma_addr_t tx_frag;
int rc = -EINVAL;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -402,19 +379,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
spin_unlock_irqrestore(&p_tx->lock, flags);
- tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->conn.gsi_enable)
- qed_ll2b_complete_tx_gsi_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag, !num_bds);
- else
- qed_ll2b_complete_tx_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag, !num_bds);
+
+ p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ p_pkt->bds_set[0].tx_frag,
+ b_last_frag, !num_bds);
+
spin_lock_irqsave(&p_tx->lock, flags);
}
@@ -425,81 +396,71 @@ out:
return rc;
}
-static int
-qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- union core_rx_cqe_union *p_cqe,
- unsigned long lock_flags, bool b_last_cqe)
+static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
{
- struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
- struct qed_ll2_rx_packet *p_pkt = NULL;
- u16 packet_length, parse_flags, vlan;
- u32 src_mac_addrhi;
- u16 src_mac_addrlo;
-
- if (!list_empty(&p_rx->active_descq))
- p_pkt = list_first_entry(&p_rx->active_descq,
- struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
- DP_NOTICE(p_hwfn,
- "GSI Rx completion but active_descq is empty\n");
- return -EIO;
- }
-
- list_del(&p_pkt->list_entry);
- parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
- packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
- vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
- src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
- src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
- DP_NOTICE(p_hwfn,
- "Mismatch between active_descq and the LL2 Rx chain\n");
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
-
- spin_unlock_irqrestore(&p_rx->lock, lock_flags);
- qed_ll2b_complete_rx_gsi_packet(p_hwfn,
- p_ll2_info->my_id,
- p_pkt->cookie,
- p_pkt->rx_buf_addr,
- packet_length,
- p_cqe->rx_cqe_gsi.data_length_error,
- parse_flags,
- vlan,
- src_mac_addrhi,
- src_mac_addrlo, b_last_cqe);
- spin_lock_irqsave(&p_rx->lock, lock_flags);
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+}
- return 0;
+static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
+{
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
+ data->length.packet_length =
+ le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
+ data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
+ data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
}
-static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn,
- union core_rx_cqe_union *p_cqe,
- unsigned long *p_lock_flags,
- bool b_last_cqe)
+static int
+qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags, bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_comp_rx_data data;
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) {
DP_NOTICE(p_hwfn,
- "LL2 Rx completion but active_descq is empty\n");
+ "[%d] LL2 Rx completion but active_descq is empty\n",
+ p_ll2_conn->input.conn_type);
+
return -EIO;
}
list_del(&p_pkt->list_entry);
+ if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
+ qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
+ else
+ qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
+
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ data.connection_handle = p_ll2_conn->my_id;
+ data.cookie = p_pkt->cookie;
+ data.rx_buf_addr = p_pkt->rx_buf_addr;
+ data.b_last_packet = b_last_cqe;
+
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
- qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
- p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
+
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
@@ -507,7 +468,7 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
- struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
@@ -521,7 +482,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
while (cq_new_idx != cq_old_idx) {
bool b_last_cqe = (cq_new_idx == cq_old_idx);
- cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cqe =
+ (union core_rx_cqe_union *)
+ qed_chain_consume(&p_rx->rcq_chain);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
DP_VERBOSE(p_hwfn,
@@ -535,13 +498,10 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
rc = -EINVAL;
break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
- rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
- cqe, flags, b_last_cqe);
- break;
case CORE_RX_CQE_TYPE_REGULAR:
- rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
- cqe, &flags,
- b_last_cqe);
+ rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
+ cqe, &flags,
+ b_last_cqe);
break;
default:
rc = -EIO;
@@ -565,10 +525,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_rx = &p_ll2_conn->rx_queue;
while (!list_empty(&p_rx->active_descq)) {
- dma_addr_t rx_buf_addr;
- void *cookie;
- bool b_last;
-
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt)
@@ -576,22 +532,26 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
- rx_buf_addr = p_pkt->rx_buf_addr;
- cookie = p_pkt->cookie;
+ dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
+ void *cookie = p_pkt->cookie;
+ bool b_last;
b_last = list_empty(&p_rx->active_descq);
+ p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ cookie,
+ rx_buf_addr, b_last);
}
}
}
-#if IS_ENABLED(CONFIG_QED_ISCSI)
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{
u8 bd_flags = 0;
@@ -741,12 +701,13 @@ static void
qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ll2_tx_pkt_info tx_pkt;
struct qed_ooo_buffer *p_buffer;
- int rc;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
u16 parse_flags;
u8 bd_flags;
+ int rc;
/* Submit Tx buffers here */
while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
@@ -761,13 +722,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
- rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
- p_buffer->vlan, bd_flags,
- l4_hdr_offset_w,
- p_ll2_conn->conn.tx_dest, 0,
- first_frag,
- p_buffer->packet_length,
- p_buffer, true);
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.vlan = p_buffer->vlan;
+ tx_pkt.bd_flags = bd_flags;
+ tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
+ tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+ tx_pkt.first_frag = first_frag;
+ tx_pkt.first_frag_len = p_buffer->packet_length;
+ tx_pkt.cookie = p_buffer;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
+ &tx_pkt, true);
if (rc) {
qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer, false);
@@ -874,85 +840,6 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return 0;
}
-static int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 rx_num_ooo_buffers, u16 mtu)
-{
- struct qed_ooo_buffer *p_buf = NULL;
- void *p_virt;
- u16 buf_idx;
- int rc = 0;
-
- if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return rc;
-
- if (!rx_num_ooo_buffers)
- return -EINVAL;
-
- for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
- p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
- if (!p_buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
- p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
- ETH_CACHE_LINE_SIZE - 1) &
- ~(ETH_CACHE_LINE_SIZE - 1);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_buf->rx_buffer_size,
- &p_buf->rx_buffer_phys_addr,
- GFP_KERNEL);
- if (!p_virt) {
- kfree(p_buf);
- rc = -ENOMEM;
- goto out;
- }
-
- p_buf->rx_buffer_virt_addr = p_virt;
- qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
- rx_num_ooo_buffers, p_buf->rx_buffer_size);
-
-out:
- return rc;
-}
-
-static void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn)
-{
- if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return;
-
- qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
-}
-
-static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn)
-{
- struct qed_ooo_buffer *p_buffer;
-
- if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return;
-
- qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
- p_hwfn->p_ooo_info))) {
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_buffer->rx_buffer_size,
- p_buffer->rx_buffer_virt_addr,
- p_buffer->rx_buffer_phys_addr);
- kfree(p_buffer);
- }
-}
-
static void qed_ll2_stop_ooo(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -966,69 +853,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev)
*handle = QED_LL2_UNUSED_HANDLE;
}
-static int qed_ll2_start_ooo(struct qed_dev *cdev,
- struct qed_ll2_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_conn ll2_info = { 0 };
- int rc;
-
- ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
- ll2_info.mtu = params->mtu;
- ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info.tx_tc = OOO_LB_TC;
- ll2_info.tx_dest = CORE_TX_DEST_LB;
-
- rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
- QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
- handle);
- if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
- goto out;
- }
-
- rc = qed_ll2_establish_connection(hwfn, *handle);
- if (rc) {
- DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
- goto fail;
- }
-
- return 0;
-
-fail:
- qed_ll2_release_connection(hwfn, *handle);
-out:
- *handle = QED_LL2_UNUSED_HANDLE;
- return rc;
-}
-#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
- void *p_cookie) { return -EINVAL; }
-static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
- void *p_cookie) { return -EINVAL; }
-static inline int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
-static inline void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void
-qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
-static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
- struct qed_ll2_params *params)
- { return -EINVAL; }
-#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
-
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1054,22 +883,21 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
- DMA_REGPAIR_LE(p_ramrod->bd_base,
- p_rx->rxq_chain.p_phys_addr);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
- p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
- p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
- : 1;
+ p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
- p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+ p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
+ (conn_type != QED_LL2_TYPE_IWARP)) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1078,14 +906,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
}
p_ramrod->action_on_error.error_type = action_on_error;
- p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1096,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1117,7 +945,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -1126,11 +954,11 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- switch (p_ll2_conn->conn.tx_tc) {
- case LB_TC:
+ switch (p_ll2_conn->input.tx_tc) {
+ case PURE_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
break;
- case OOO_LB_TC:
+ case PKT_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
break;
default:
@@ -1145,18 +973,27 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_FCOE;
break;
case QED_LL2_TYPE_ISCSI:
- case QED_LL2_TYPE_ISCSI_OOO:
p_ramrod->conn_type = PROTOCOLID_ISCSI;
break;
case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE;
break;
+ case QED_LL2_TYPE_IWARP:
+ p_ramrod->conn_type = PROTOCOLID_IWARP;
+ break;
+ case QED_LL2_TYPE_OOO:
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ else
+ p_ramrod->conn_type = PROTOCOLID_IWARP;
+ break;
default:
p_ramrod->conn_type = PROTOCOLID_ETH;
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
}
- p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1212,22 +1049,22 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
static int
qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+ struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_rx_packet *p_descq;
u32 capacity;
int rc = 0;
- if (!rx_num_desc)
+ if (!p_ll2_info->input.rx_num_desc)
goto out;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
QED_CHAIN_CNT_TYPE_U16,
- rx_num_desc,
+ p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd),
- &p_ll2_info->rx_queue.rxq_chain);
+ &p_ll2_info->rx_queue.rxq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out;
@@ -1247,9 +1084,9 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- rx_num_desc,
+ p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe),
- &p_ll2_info->rx_queue.rcq_chain);
+ &p_ll2_info->rx_queue.rcq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out;
@@ -1257,30 +1094,29 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn.conn_type, rx_num_desc);
+ p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
out:
return rc;
}
static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 tx_num_desc)
+ struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_tx_packet *p_descq;
u32 capacity;
int rc = 0;
- if (!tx_num_desc)
+ if (!p_ll2_info->input.tx_num_desc)
goto out;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- tx_num_desc,
+ p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd),
- &p_ll2_info->tx_queue.txq_chain);
+ &p_ll2_info->tx_queue.txq_chain, NULL);
if (rc)
goto out;
@@ -1295,28 +1131,112 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn.conn_type, tx_num_desc);
+ p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
out:
if (rc)
DP_NOTICE(p_hwfn,
"Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
- tx_num_desc);
+ p_ll2_info->input.tx_num_desc);
+ return rc;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 mtu)
+{
+ struct qed_ooo_buffer *p_buf = NULL;
+ void *p_virt;
+ u16 buf_idx;
+ int rc = 0;
+
+ if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
+ return rc;
+
+ /* Correct number of requested OOO buffers if needed */
+ if (!p_ll2_info->input.rx_num_ooo_buffers) {
+ u16 num_desc = p_ll2_info->input.rx_num_desc;
+
+ if (!num_desc)
+ return -EINVAL;
+ p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
+ }
+
+ for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
+ buf_idx++) {
+ p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+ if (!p_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+ p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+ ETH_CACHE_LINE_SIZE - 1) &
+ ~(ETH_CACHE_LINE_SIZE - 1);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buf->rx_buffer_size,
+ &p_buf->rx_buffer_phys_addr,
+ GFP_KERNEL);
+ if (!p_virt) {
+ kfree(p_buf);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_virt_addr = p_virt;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+ p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
return rc;
}
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_conn *p_params,
- u16 rx_num_desc,
- u16 tx_num_desc,
- u8 *p_connection_handle)
+static int
+qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
{
+ if (!cbs || (!cbs->rx_comp_cb ||
+ !cbs->rx_release_cb ||
+ !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
+ return -EINVAL;
+
+ p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
+ p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
+ p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
+ p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.cookie = cbs->cookie;
+
+ return 0;
+}
+
+static enum core_error_handle
+qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
+{
+ switch (err) {
+ case QED_LL2_DROP_PACKET:
+ return LL2_DROP_PACKET;
+ case QED_LL2_DO_NOTHING:
+ return LL2_DO_NOTHING;
+ case QED_LL2_ASSERT:
+ return LL2_ASSERT;
+ default:
+ return LL2_DO_NOTHING;
+ }
+}
+
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
+{
+ struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
+ u8 i, *p_tx_max;
int rc;
- u8 i;
- if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
/* Find a free connection to be used */
@@ -1335,23 +1255,40 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (!p_ll2_info)
return -EBUSY;
- p_ll2_info->conn = *p_params;
+ memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
+
+ p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
+ CORE_TX_DEST_NW : CORE_TX_DEST_LB;
- rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ /* Correct maximum number of Tx BDs */
+ p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
+ if (*p_tx_max == 0)
+ *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
+ else
+ *p_tx_max = min_t(u8, *p_tx_max,
+ CORE_LL2_TX_MAX_BDS_PER_PACKET);
+
+ rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Invalid callback functions\n");
+ goto q_allocate_fail;
+ }
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
if (rc)
goto q_allocate_fail;
- rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
if (rc)
goto q_allocate_fail;
rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
- rx_num_desc * 2, p_params->mtu);
+ data->input.mtu);
if (rc)
goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */
- if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (data->input.conn_type == QED_LL2_TYPE_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion;
} else {
@@ -1359,7 +1296,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
comp_tx_cb = qed_ll2_txq_completion;
}
- if (rx_num_desc) {
+ if (data->input.rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb,
&p_hwfn->p_ll2_info[i],
&p_ll2_info->rx_queue.rx_sb_index,
@@ -1367,7 +1304,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
p_ll2_info->rx_queue.b_cb_registred = true;
}
- if (tx_num_desc) {
+ if (data->input.tx_num_desc) {
qed_int_register_cb(p_hwfn,
comp_tx_cb,
&p_hwfn->p_ll2_info[i],
@@ -1376,7 +1313,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
p_ll2_info->tx_queue.b_cb_registred = true;
}
- *p_connection_handle = i;
+ *data->p_connection_handle = i;
return rc;
q_allocate_fail:
@@ -1387,24 +1324,39 @@ q_allocate_fail:
static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ enum qed_ll2_error_handle error_input;
+ enum core_error_handle error_mode;
u8 action_on_error = 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
-
+ error_input = p_ll2_conn->input.ai_err_packet_too_big;
+ error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
- p_ll2_conn->conn.ai_err_packet_too_big);
- SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
+ error_input = p_ll2_conn->input.ai_err_no_buf;
+ error_mode = qed_ll2_get_error_choice(error_input);
+ SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
}
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
{
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
@@ -1477,12 +1429,12 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (rc)
goto out;
- if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
@@ -1531,11 +1483,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct core_rx_bd_with_buff_len *p_curb = NULL;
struct qed_ll2_rx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn;
@@ -1594,20 +1547,18 @@ out:
static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
struct qed_ll2_tx_queue *p_tx,
struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- dma_addr_t first_frag,
- u16 first_frag_len, void *p_cookie,
+ struct qed_ll2_tx_pkt_info *pkt,
u8 notify_fw)
{
list_del(&p_curp->list_entry);
- p_curp->cookie = p_cookie;
- p_curp->bd_used = num_of_bds;
+ p_curp->cookie = pkt->cookie;
+ p_curp->bd_used = pkt->num_of_bds;
p_curp->notify_fw = notify_fw;
p_tx->cur_send_packet = p_curp;
p_tx->cur_send_frag_num = 0;
- p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
- p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
p_tx->cur_send_frag_num++;
}
@@ -1615,51 +1566,52 @@ static void
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len)
+ struct qed_ll2_tx_pkt_info *pkt)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ enum core_tx_dest tx_dest;
u16 bd_data = 0, frag_idx;
+ roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
+ : CORE_RROCE;
+
+ tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
+ : CORE_TX_DEST_LB;
+
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
- cpu_to_le16(l4_hdr_offset_w));
+ cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- bd_data |= bd_flags;
+ bd_data |= pkt->bd_flags;
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
- SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
- DMA_REGPAIR_LE(start_bd->addr, first_frag);
- start_bd->nbytes = cpu_to_le16(first_frag_len);
+ DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
+ start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id,
p_ll2->cid,
- p_ll2->conn.conn_type,
+ p_ll2->input.conn_type,
prod_idx,
- first_frag_len,
- num_of_bds,
+ pkt->first_frag_len,
+ pkt->num_of_bds,
le32_to_cpu(start_bd->addr.hi),
le32_to_cpu(start_bd->addr.lo));
- if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
return;
/* Need to provide the packet with additional BDs for frags */
for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
- frag_idx < num_of_bds; frag_idx++) {
+ frag_idx < pkt->num_of_bds; frag_idx++) {
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
@@ -1722,26 +1674,20 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
- p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
+ p_ll2_conn->cid,
+ p_ll2_conn->input.conn_type, db_msg.spq_prod);
}
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
- u8 num_of_bds,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum qed_ll2_tx_dest e_tx_dest,
- enum qed_ll2_roce_flavor_type qed_roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len, void *cookie, u8 notify_fw)
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_tx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn = NULL;
- enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain;
- enum core_tx_dest tx_dest;
unsigned long flags;
int rc = 0;
@@ -1751,7 +1697,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -1764,7 +1710,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_tx->free_descq))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
p_curp = NULL;
if (!p_curp) {
@@ -1772,26 +1718,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
goto out;
}
- tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
- CORE_TX_DEST_LB;
- if (qed_roce_flavor == QED_LL2_ROCE) {
- roce_flavor = CORE_ROCE;
- } else if (qed_roce_flavor == QED_LL2_RROCE) {
- roce_flavor = CORE_RROCE;
- } else {
- rc = -EINVAL;
- goto out;
- }
-
/* Prepare packet and BD, and perhaps send a doorbell to FW */
- qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
- num_of_bds, first_frag,
- first_frag_len, cookie, notify_fw);
- qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
- num_of_bds, tx_dest,
- vlan, bd_flags, l4_hdr_offset_w,
- roce_flavor,
- first_frag, first_frag_len);
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
+
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
@@ -1800,11 +1730,12 @@ out:
return rc;
}
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes)
{
struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
u16 cur_send_frag_num = 0;
struct core_tx_bd *p_bd;
@@ -1839,8 +1770,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
struct qed_ptt *p_ptt;
@@ -1870,10 +1802,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
@@ -1887,8 +1819,28 @@ out:
return rc;
}
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ooo_buffer *p_buffer;
+
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+}
+
+void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
@@ -1921,7 +1873,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
mutex_unlock(&p_ll2_conn->mutex);
}
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ll2_info *p_ll2_connections;
u8 i;
@@ -1931,28 +1883,52 @@ struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
sizeof(struct qed_ll2_info), GFP_KERNEL);
if (!p_ll2_connections) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
- return NULL;
+ return -ENOMEM;
}
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
p_ll2_connections[i].my_id = i;
- return p_ll2_connections;
+ p_hwfn->p_ll2_info = p_ll2_connections;
+ return 0;
}
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_setup(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
- mutex_init(&p_ll2_connections[i].mutex);
+ mutex_init(&p_hwfn->p_ll2_info[i].mutex);
}
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_free(struct qed_hwfn *p_hwfn)
{
- kfree(p_ll2_connections);
+ if (!p_hwfn->p_ll2_info)
+ return;
+
+ kfree(p_hwfn->p_ll2_info);
+ p_hwfn->p_ll2_info = NULL;
+}
+
+static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_port_stats port_stats;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
+ sizeof(port_stats));
+
+ p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+ p_stats->gsi_invalid_pkt_length =
+ HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
+ p_stats->gsi_unsupported_pkt_typ =
+ HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
+ p_stats->gsi_crcchksm_error =
+ HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
}
static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
@@ -2018,9 +1994,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
}
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ptt *p_ptt;
@@ -2038,6 +2015,8 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ if (p_ll2_conn->input.gsi_enable)
+ _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
if (p_ll2_conn->tx_stats_en)
@@ -2047,6 +2026,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_ll2b_release_rx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+
+ qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
+}
+
static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie)
@@ -2055,21 +2045,86 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
cdev->ll2->cb_cookie = cookie;
}
+struct qed_ll2_cbs ll2_cbs = {
+ .rx_comp_cb = &qed_ll2b_complete_rx_packet,
+ .rx_release_cb = &qed_ll2b_release_rx_packet,
+ .tx_comp_cb = &qed_ll2b_complete_tx_packet,
+ .tx_release_cb = &qed_ll2b_complete_tx_packet,
+};
+
+static void qed_ll2_set_conn_data(struct qed_dev *cdev,
+ struct qed_ll2_acquire_data *data,
+ struct qed_ll2_params *params,
+ enum qed_ll2_conn_type conn_type,
+ u8 *handle, bool lb)
+{
+ memset(data, 0, sizeof(*data));
+
+ data->input.conn_type = conn_type;
+ data->input.mtu = params->mtu;
+ data->input.rx_num_desc = QED_LL2_RX_SIZE;
+ data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
+ data->input.tx_num_desc = QED_LL2_TX_SIZE;
+ data->p_connection_handle = handle;
+ data->cbs = &ll2_cbs;
+ ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
+
+ if (lb) {
+ data->input.tx_tc = PKT_LB_TC;
+ data->input.tx_dest = QED_LL2_TX_DEST_LB;
+ } else {
+ data->input.tx_tc = 0;
+ data->input.tx_dest = QED_LL2_TX_DEST_NW;
+ }
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+ struct qed_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ struct qed_ll2_acquire_data data;
+ int rc;
+
+ qed_ll2_set_conn_data(cdev, &data, params,
+ QED_LL2_TYPE_OOO, handle, true);
+
+ rc = qed_ll2_acquire_connection(hwfn, &data);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+ goto out;
+ }
+
+ rc = qed_ll2_establish_connection(hwfn, *handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ qed_ll2_release_connection(hwfn, *handle);
+out:
+ *handle = QED_LL2_UNUSED_HANDLE;
+ return rc;
+}
+
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
- struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
+ struct qed_ll2_acquire_data data;
struct qed_ptt *p_ptt;
int rc, i;
- u8 gsi_enable = 1;
+
/* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock);
cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
L1_CACHE_BYTES + params->mtu;
- cdev->ll2->frags_mapped = params->frags_mapped;
/*Allocate memory for LL2 */
DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
@@ -2094,11 +2149,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_FCOE:
conn_type = QED_LL2_TYPE_FCOE;
- gsi_enable = 0;
break;
case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI;
- gsi_enable = 0;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -2107,20 +2160,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
conn_type = QED_LL2_TYPE_TEST;
}
- /* Prepare the temporary ll2 information */
- memset(&ll2_info, 0, sizeof(ll2_info));
+ qed_ll2_set_conn_data(cdev, &data, params, conn_type,
+ &cdev->ll2->handle, false);
- ll2_info.conn_type = conn_type;
- ll2_info.mtu = params->mtu;
- ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info.tx_tc = 0;
- ll2_info.tx_dest = CORE_TX_DEST_NW;
- ll2_info.gsi_enable = gsi_enable;
-
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
- QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
- &cdev->ll2->handle);
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 connection\n");
goto fail;
@@ -2243,6 +2286,7 @@ fail:
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
{
+ struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
int rc = -EINVAL, i;
dma_addr_t mapping;
@@ -2277,32 +2321,30 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
- rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle,
- 1 + skb_shinfo(skb)->nr_frags,
- vlan, flags, 0, QED_LL2_TX_DEST_NW,
- 0 /* RoCE FLAVOR */,
- mapping, skb->len, skb, 1);
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+ pkt.vlan = vlan;
+ pkt.bd_flags = flags;
+ pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ pkt.first_frag = mapping;
+ pkt.first_frag_len = skb->len;
+ pkt.cookie = skb;
+
+ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+ &pkt, 1);
if (rc)
goto err;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- if (!cdev->ll2->frags_mapped) {
- mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&cdev->pdev->dev,
- mapping))) {
- DP_NOTICE(cdev,
- "Unable to map frag - dropping packet\n");
- rc = -ENOMEM;
- goto err;
- }
- } else {
- mapping = page_to_phys(skb_frag_page(frag)) |
- frag->page_offset;
+
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
}
rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 31a409033c41..a822528e9c63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -47,29 +47,6 @@
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
-enum qed_ll2_roce_flavor_type {
- QED_LL2_ROCE,
- QED_LL2_RROCE,
- MAX_QED_LL2_ROCE_FLAVOR_TYPE
-};
-
-enum qed_ll2_conn_type {
- QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
- QED_LL2_TYPE_TEST,
- QED_LL2_TYPE_ISCSI_OOO,
- QED_LL2_TYPE_RESERVED2,
- QED_LL2_TYPE_ROCE,
- QED_LL2_TYPE_RESERVED3,
- MAX_QED_LL2_RX_CONN_TYPE
-};
-
-enum qed_ll2_tx_dest {
- QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
- QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
- QED_LL2_TX_DEST_MAX
-};
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -135,30 +112,21 @@ struct qed_ll2_tx_queue {
bool b_completing_packet;
};
-struct qed_ll2_conn {
- enum qed_ll2_conn_type conn_type;
- u16 mtu;
- u8 rx_drop_ttl0_flg;
- u8 rx_vlan_removal_en;
- u8 tx_tc;
- enum core_tx_dest tx_dest;
- enum core_error_handle ai_err_packet_too_big;
- enum core_error_handle ai_err_no_buf;
- u8 gsi_enable;
-};
-
struct qed_ll2_info {
/* Lock protecting the state of LL2 */
struct mutex mutex;
- struct qed_ll2_conn conn;
+
+ struct qed_ll2_acquire_data_inputs input;
u32 cid;
u8 my_id;
u8 queue_id;
u8 tx_stats_id;
bool b_active;
+ enum core_tx_dest tx_dest;
u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
+ struct qed_ll2_cbs cbs;
};
/**
@@ -166,38 +134,30 @@ struct qed_ll2_info {
* starts rx & tx (if relevant) queues pair. Provides
* connecion handler as output parameter.
*
- * @param p_hwfn
- * @param p_params Contain various configuration properties
- * @param rx_num_desc
- * @param tx_num_desc
- *
- * @param p_connection_handle Output container for LL2 connection's handle
*
- * @return 0 on success, failure otherwise
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param data - describes connection parameters
+ * @return int
*/
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_conn *p_params,
- u16 rx_num_desc,
- u16 tx_num_desc,
- u8 *p_connection_handle);
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
* @brief qed_ll2_establish_connection - start previously
* allocated LL2 queues pair
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param p_ptt
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
* @param addr rx (physical address) buffers to submit
@@ -206,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw);
@@ -215,53 +175,34 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
* @brief qed_ll2_prepare_tx_packet - request for start Tx BD
* to prepare Tx packet submission to FW.
*
- * @param p_hwfn
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param num_of_bds a number of requested BD equals a number of
- * fragments in Tx packet
- * @param vlan VLAN to insert to packet (if insertion set)
- * @param bd_flags
- * @param l4_hdr_offset_w L4 Header Offset from start of packet
- * (in words). This is needed if both l4_csum
- * and ipv6_ext are set
- * @param e_tx_dest indicates if the packet is to be transmitted via
- * loopback or to the network
- * @param first_frag
- * @param first_frag_len
- * @param cookie
- *
- * @param notify_fw
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param connection_handle
+ * @param pkt - info regarding the tx packet
+ * @param notify_fw - issue doorbell to fw for this packet
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
- u8 num_of_bds,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum qed_ll2_tx_dest e_tx_dest,
- enum qed_ll2_roce_flavor_type qed_roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len, void *cookie, u8 notify_fw);
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw);
/**
* @brief qed_ll2_release_connection - releases resources
* allocated for LL2 connection
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
*/
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
* Tx BD of BDs requested by
* qed_ll2_prepare_tx_packet
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle
* obtained from
* qed_ll2_require_connection
@@ -270,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
@@ -278,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
* @brief qed_ll2_terminate_connection - stops Tx/Rx queues
*
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle
* obtained from
* qed_ll2_require_connection
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_get_stats - get LL2 queue's statistics
*
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
* @param p_stats
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
@@ -306,27 +247,24 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
*
* @param p_hwfn
*
- * @return pointer to alocated qed_ll2_info or NULL
+ * @return int
*/
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_setup - Inits LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_free - Releases LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_free(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 715b3aaf83ac..b11399606990 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
@@ -123,7 +122,7 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
- if (cdev->doorbells)
+ if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
iounmap(cdev->regview);
@@ -207,16 +206,24 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
- if (IS_PF(cdev)) {
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ if (!cdev->db_size) {
+ if (IS_PF(cdev)) {
+ DP_NOTICE(cdev, "No Doorbell bar available\n");
+ return -EINVAL;
+ } else {
+ return 0;
}
}
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
return 0;
err2:
@@ -230,6 +237,8 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hw_info *hw_info = &p_hwfn->hw_info;
struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
@@ -253,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
- QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type;
- ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+ ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION;
@@ -267,9 +275,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
- if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
- QED_WOL_SUPPORT_PME)
+ if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
+
+ dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
@@ -282,6 +291,9 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
+ qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mbi_version);
+
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
@@ -292,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL);
}
- dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+ dev_info->mtu = hw_info->mtu;
return 0;
}
@@ -336,6 +348,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol;
if (params->is_vf)
@@ -607,6 +620,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
+{
+ /* Calling the disable function will make sure that any
+ * currently-running function is completed. The following call to the
+ * enable function makes this sequence a flush-like operation.
+ */
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_enable(p_hwfn->sp_dpc);
+ }
+}
+
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -618,6 +643,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);
+
+ qed_slowpath_tasklet_flush(p_hwfn);
}
static void qed_slowpath_irq_free(struct qed_dev *cdev)
@@ -745,7 +772,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
- cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */
}
@@ -763,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->num_hwfns;
if (!IS_ENABLED(CONFIG_QED_RDMA) ||
- QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
+ !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
return 0;
for_each_hwfn(cdev, i)
@@ -904,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
/* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/
- if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
- QED_PCI_ETH_ROCE) {
+ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons;
@@ -1112,17 +1138,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
return 0;
}
-static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
- char ver_str[VER_SIZE])
+static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
-
- memcpy(cdev->ver_str, ver_str, VER_SIZE);
- cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static u32 qed_sb_init(struct qed_dev *cdev,
@@ -1520,6 +1542,21 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
+ u8 *buf, u16 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int rc;
+
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
@@ -1676,7 +1713,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
- .set_id = &qed_set_id,
+ .set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
@@ -1697,6 +1734,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .nvm_get_image = &qed_nvm_get_image,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 7266b36a2655..9da91045d167 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -177,6 +177,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
}
kfree(p_hwfn->mcp_info);
+ p_hwfn->mcp_info = NULL;
return 0;
}
@@ -1397,6 +1398,28 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&param);
}
+static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
+ (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+ qed_sp_pf_update_stag(p_hwfn);
+ }
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -1452,6 +1475,10 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ qed_mcp_update_stag(p_hwfn, p_ptt);
+ break;
+ break;
default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -1522,6 +1549,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ /* Read the address of the nvm_cfg */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read the offset of nvm_cfg1 */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, mbi_version);
+ *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
+ mbi_ver_addr) &
+ (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
+
+ return 0;
+}
+
int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
@@ -1679,10 +1736,10 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
}
- info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
- (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
- info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
- (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
+ (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
+ (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
@@ -1770,8 +1827,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+static int
+qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
@@ -1801,6 +1859,36 @@ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
return rc;
}
+static int
+qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n", num);
+ }
+
+ return rc;
+}
+
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2222,6 +2310,95 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc;
}
+static int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att)
+{
+ struct bist_nvm_image_att mfw_image_att;
+ enum nvm_image_type type;
+ u32 num_images, i;
+ int rc;
+
+ /* Translate image_id into MFW definitions */
+ switch (image_id) {
+ case QED_NVM_IMAGE_ISCSI_CFG:
+ type = NVM_TYPE_ISCSI_CFG;
+ break;
+ case QED_NVM_IMAGE_FCOE_CFG:
+ type = NVM_TYPE_FCOE_CFG;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
+ image_id);
+ return -EINVAL;
+ }
+
+ /* Learn number of images, then traverse and see if one fits */
+ rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
+ if (rc || !num_images)
+ return -EINVAL;
+
+ for (i = 0; i < num_images; i++) {
+ rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
+ &mfw_image_att, i);
+ if (rc)
+ return rc;
+
+ if (type == mfw_image_att.image_type)
+ break;
+ }
+ if (i == num_images) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Failed to find nvram image of type %08x\n",
+ image_id);
+ return -EINVAL;
+ }
+
+ p_image_att->start_addr = mfw_image_att.nvm_start_addr;
+ p_image_att->length = mfw_image_att.len;
+
+ return 0;
+}
+
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ memset(p_buffer, 0, buffer_len);
+
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ /* Validate sizes - both the image's and the supplied buffer's */
+ if (image_att.length <= 4) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Image [%d] is too small - only %d bytes\n",
+ image_id, image_att.length);
+ return -EINVAL;
+ }
+
+ /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
+ image_att.length -= 4;
+
+ if (image_att.length > buffer_len) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_STORAGE,
+ "Image [%d] is too big - %08x bytes where only %08x are available\n",
+ image_id, image_att.length, buffer_len);
+ return -ENOMEM;
+ }
+
+ return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
+ p_buffer, image_att.length);
+}
+
static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
{
enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 2b09b8545236..af03b3651411 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -256,6 +256,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
+ * @brief Get the MBI version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver);
+
+/**
* @brief Get media type value of the port.
*
* @param cdev - qed dev pointer
@@ -418,6 +430,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+struct qed_nvm_image_att {
+ u32 start_addr;
+ u32 length;
+};
+
+/**
+ * @brief Allows reading a whole nvram image
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param image_id - image requested for reading
+ * @param p_buffer - allocated buffer into which to fill data
+ * @param buffer_len - length of the allocated buffer.
+ *
+ * @return 0 iff p_buffer now contains the nvram image.
+ */
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len);
+
/**
* @brief Bist register test
*
@@ -482,7 +515,7 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * \
+ ((_p_hwfn)->cdev->num_ports_in_engine * \
qed_device_num_engines((_p_hwfn)->cdev)))
struct qed_mcp_info {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index db96670192c7..000636530111 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -99,7 +99,7 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
p_history->head_idx++;
}
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
@@ -109,7 +109,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
- return NULL;
+ return -EINVAL;
}
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
@@ -119,12 +119,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n");
- return NULL;
+ return -EINVAL;
}
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info)
- return NULL;
+ return -ENOMEM;
p_ooo_info->cid_base = cid_base;
p_ooo_info->max_num_archipelagos = max_num_archipelagos;
@@ -164,7 +164,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
- return p_ooo_info;
+ p_hwfn->p_ooo_info = p_ooo_info;
+ return 0;
no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem);
@@ -172,7 +173,7 @@ no_archipelagos_mem:
kfree(p_ooo_info->p_isles_mem);
no_isles_mem:
kfree(p_ooo_info);
- return NULL;
+ return -ENOMEM;
}
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -249,19 +250,23 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
&p_ooo_info->free_buffers_list);
}
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_setup(struct qed_hwfn *p_hwfn)
{
- qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
- memset(p_ooo_info->ooo_history.p_cqes, 0,
- p_ooo_info->ooo_history.num_of_cqes *
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0,
+ p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
- p_ooo_info->ooo_history.head_idx = 0;
+ p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
}
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
struct qed_ooo_buffer *p_buffer;
+ if (!p_ooo_info)
+ return;
+
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
@@ -282,6 +287,7 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info);
+ p_hwfn->p_ooo_info = NULL;
}
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 791ad0f8b759..e8ed40b848f5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -88,7 +88,11 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
@@ -97,10 +101,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
@@ -140,8 +140,14 @@ static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
-static inline struct qed_ooo_info *qed_ooo_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
+static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -152,12 +158,6 @@ static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{}
-static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
-static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 434a164a76ed..5a90d69dc2f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -80,7 +80,7 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* MFW doesn't support resource locking, first PF on the port
* has lock ownership.
*/
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
return 0;
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
@@ -108,7 +108,7 @@ static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
if (rc == -EINVAL) {
/* MFW doesn't support locking, first PF has lock ownership */
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
rc = 0;
} else {
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
new file mode 100644
index 000000000000..6fb99518a61f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -0,0 +1,1787 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
+#include "qed_roce.h"
+#include "qed_sp.h"
+
+
+int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap)
+ return -ENOMEM;
+
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ return 0;
+}
+
+int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+ if (*id_num >= bmap->max_count)
+ return -EINVAL;
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
+ return 0;
+}
+
+void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
+void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
+}
+
+static bool qed_bmap_is_empty(struct qed_bmap *bmap)
+{
+ return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info)
+ return rc;
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ p_rdma_info->num_qps = num_cons;
+ else
+ p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev)
+ goto free_rdma_info;
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port)
+ goto free_rdma_dev;
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count, "DPI");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2, "CQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2, "Toggle");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs, "MR");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ rc = qed_iwarp_alloc(p_hwfn);
+
+ if (rc)
+ goto free_cid_map;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_resc_free(p_hwfn);
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_init_devinfo(p_hwfn);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_init_hw(p_hwfn, p_ptt);
+ else
+ rc = qed_roce_init_hw(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ u16 igu_sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
+ else
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+ p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_roce_setup(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_stop(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+ } else {
+ qed_roce_stop(p_hwfn);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
+ return p_port;
+}
+
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
+ QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
+
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ spin_lock_bh(&p_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ spin_unlock_bh(&p_info->lock);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ enum protocol_type proto;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ proto = p_hwfn->p_rdma_info->proto;
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_query_qp(qp, out_params);
+ else
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ rc = qed_iwarp_destroy_qp(p_hwfn, qp);
+ else
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+static struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ if (in_params->sq_num_pages * sizeof(struct regpair) >
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Sq num pages: %d exceeds maximum\n",
+ in_params->sq_num_pages);
+ return NULL;
+ }
+ if (in_params->rq_num_pages * sizeof(struct regpair) >
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Rq num pages: %d exceeds maximum\n",
+ in_params->rq_num_pages);
+ return NULL;
+ }
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
+ qp->qpid = qp->icid;
+ } else {
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+ }
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ enum qed_iwarp_qp_state new_state =
+ qed_roce2iwarp_state(qp->cur_state);
+
+ rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
+ } else {
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
+{
+ bool result;
+
+ /* if rdma info has not been allocated, naturally there are no qps */
+ if (!p_hwfn->p_rdma_info)
+ return false;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ if (!p_hwfn->p_rdma_info->cid_map.bitmap)
+ result = false;
+ else
+ result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ return result;
+}
+
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add MAC filter\n");
+
+ return rc;
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .ll2_acquire_connection = &qed_ll2_acquire_connection,
+ .ll2_establish_connection = &qed_ll2_establish_connection,
+ .ll2_terminate_connection = &qed_ll2_terminate_connection,
+ .ll2_release_connection = &qed_ll2_release_connection,
+ .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
+ .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
+ .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
+ .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .ll2_get_stats = &qed_ll2_get_stats,
+ .iwarp_connect = &qed_iwarp_connect,
+ .iwarp_create_listen = &qed_iwarp_create_listen,
+ .iwarp_destroy_listen = &qed_iwarp_destroy_listen,
+ .iwarp_accept = &qed_iwarp_accept,
+ .iwarp_reject = &qed_iwarp_reject,
+ .iwarp_send_rtr = &qed_iwarp_send_rtr,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
new file mode 100644
index 000000000000..18ec9cbd84f5
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -0,0 +1,206 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_RDMA_H
+#define _QED_RDMA_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_iwarp.h"
+#include "qed_roce.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+#define QED_RDMA_MAX_BMAP_NAME (10)
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap tcp_cid_map;
+ struct qed_bmap real_cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ u16 max_queue_zones;
+ enum protocol_type proto;
+ struct qed_iwarp_info iwarp;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ enum qed_iwarp_qp_state iwarp_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+ u32 cq_prod;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+ struct qed_iwarp_ep *ep;
+};
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
+
+int
+qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name);
+
+void
+qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
+
+int
+qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num);
+
+void
+qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void
+qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+int
+qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac);
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 1ae73b2d6d1e..0cdb4337b3a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -242,6 +242,8 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE \
0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
@@ -558,6 +560,7 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
@@ -592,15 +595,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT \
+#define PGLCS_REG_DBG_SELECT_K2 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT \
+#define PGLCS_REG_DBG_SHIFT_K2 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID \
+#define PGLCS_REG_DBG_FORCE_VALID_K2 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -612,7 +615,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2 \
+#define MISCS_REG_RESET_PL_HV_2_K2 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -644,15 +647,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT \
+#define UMAC_REG_DBG_SELECT_K2 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT \
+#define UMAC_REG_DBG_SHIFT_K2 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID \
+#define UMAC_REG_DBG_FORCE_VALID_K2 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME \
+#define UMAC_REG_DBG_FORCE_FRAME_K2 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -924,15 +927,15 @@
0x4c160cUL
#define XYLD_REG_DBG_FORCE_FRAME \
0x4c1610UL
-#define YULD_REG_DBG_SELECT \
+#define YULD_REG_DBG_SELECT_BB_K2 \
0x4c9600UL
-#define YULD_REG_DBG_DWORD_ENABLE \
+#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \
0x4c9604UL
-#define YULD_REG_DBG_SHIFT \
+#define YULD_REG_DBG_SHIFT_BB_K2 \
0x4c9608UL
-#define YULD_REG_DBG_FORCE_VALID \
+#define YULD_REG_DBG_FORCE_VALID_BB_K2 \
0x4c960cUL
-#define YULD_REG_DBG_FORCE_FRAME \
+#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \
0x4c9610UL
#define TMLD_REG_DBG_SELECT \
0x4d1600UL
@@ -994,35 +997,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT \
+#define WOL_REG_DBG_SELECT_K2 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE \
+#define WOL_REG_DBG_DWORD_ENABLE_K2 \
0x600144UL
-#define WOL_REG_DBG_SHIFT \
+#define WOL_REG_DBG_SHIFT_K2 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID \
+#define WOL_REG_DBG_FORCE_VALID_K2 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME \
+#define WOL_REG_DBG_FORCE_FRAME_K2 \
0x600150UL
-#define BMBN_REG_DBG_SELECT \
+#define BMBN_REG_DBG_SELECT_K2 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT \
+#define BMBN_REG_DBG_SHIFT_K2 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID \
+#define BMBN_REG_DBG_FORCE_VALID_K2 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME \
+#define BMBN_REG_DBG_FORCE_FRAME_K2 \
0x610150UL
-#define NWM_REG_DBG_SELECT \
+#define NWM_REG_DBG_SELECT_K2 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE \
+#define NWM_REG_DBG_DWORD_ENABLE_K2 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT \
+#define NWM_REG_DBG_SHIFT_K2 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID \
+#define NWM_REG_DBG_FORCE_VALID_K2 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME \
+#define NWM_REG_DBG_FORCE_FRAME_K2\
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1244,35 +1247,35 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT \
+#define NWS_REG_DBG_SELECT_K2 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE \
+#define NWS_REG_DBG_DWORD_ENABLE_K2 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT \
+#define NWS_REG_DBG_SHIFT_K2 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID \
+#define NWS_REG_DBG_FORCE_VALID_K2 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME \
+#define NWS_REG_DBG_FORCE_FRAME_K2 \
0x700138UL
-#define MS_REG_DBG_SELECT \
+#define MS_REG_DBG_SELECT_K2 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE \
+#define MS_REG_DBG_DWORD_ENABLE_K2 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT \
+#define MS_REG_DBG_SHIFT_K2 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID \
+#define MS_REG_DBG_FORCE_VALID_K2 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME \
+#define MS_REG_DBG_FORCE_FRAME_K2 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT \
+#define PCIE_REG_DBG_COMMON_SELECT_K2 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
0x0543a8UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
@@ -1328,85 +1331,85 @@
0x128170cUL
#define UCM_REG_SM_TASK_CTX \
0x1281710UL
-#define XSEM_REG_SLOW_DBG_EMPTY \
+#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE \
+#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE \
+#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE \
+#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
-#define XSEM_REG_DBG_MODE1_CFG \
+#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE \
+#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE \
+#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE \
+#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
-#define YSEM_REG_DBG_MODE1_CFG \
+#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
-#define PSEM_REG_SLOW_DBG_EMPTY \
+#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE \
+#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE \
+#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE \
+#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
-#define PSEM_REG_DBG_MODE1_CFG \
+#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
-#define TSEM_REG_SLOW_DBG_EMPTY \
+#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE \
+#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE \
+#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE \
+#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
-#define TSEM_REG_DBG_MODE1_CFG \
+#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
-#define MSEM_REG_SLOW_DBG_EMPTY \
+#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE \
+#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE \
+#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE \
+#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
-#define MSEM_REG_DBG_MODE1_CFG \
+#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
-#define USEM_REG_SLOW_DBG_EMPTY \
+#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE \
+#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE \
+#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE \
+#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
-#define USEM_REG_DBG_MODE1_CFG \
+#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
@@ -1430,7 +1433,7 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
-#define SEM_FAST_REG_STALL_0 \
+#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1480,37 +1483,37 @@
4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
-#define NWS_REG_NWS_CMU \
+#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
- 0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
@@ -1557,9 +1560,16 @@
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
+
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 56289d7cd306..fb7c2d1562ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -35,12 +35,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
#include <linux/io.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -49,10 +44,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/tcp.h>
-#include <linux/bitops.h>
-#include <linux/qed/qed_roce_if.h>
-#include <linux/qed/qed_roce_if.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -62,18 +53,21 @@
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-#include "qed_sp.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
#include "qed_roce.h"
-#include "qed_ll2.h"
+#include "qed_sp.h"
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data)
+static int
+qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
- (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+ (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is
@@ -85,290 +79,15 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn,
events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code,
- &rdma_data->async_handle);
- }
-}
-
-static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count, char *name)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
-
- bmap->max_count = max_count;
-
- bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
- GFP_KERNEL);
- if (!bmap->bitmap) {
- DP_NOTICE(p_hwfn,
- "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
- return -ENOMEM;
+ (void *)&data->rdma_data.async_handle);
}
- snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
- return 0;
-}
-
-static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 *id_num)
-{
- *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
- if (*id_num >= bmap->max_count)
- return -EINVAL;
-
- __set_bit(*id_num, bmap->bitmap);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
- bmap->name, *id_num);
-
return 0;
}
-static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return;
-
- __set_bit(id_num, bmap->bitmap);
-}
-
-static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- bool b_acquired;
-
- if (id_num >= bmap->max_count)
- return;
-
- b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
- if (!b_acquired) {
- DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
- bmap->name, id_num);
- return;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
- bmap->name, id_num);
-}
-
-static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return -1;
-
- return test_bit(id_num, bmap->bitmap);
-}
-
-static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
-{
- /* First sb id for RoCE is after all the l2 sb */
- return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
-}
-
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_info *p_rdma_info;
- u32 num_cons, num_tasks;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
- /* Allocate a struct with current pf rdma info */
- p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
- if (!p_rdma_info) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
- rc);
- return rc;
- }
-
- p_hwfn->p_rdma_info = p_rdma_info;
- p_rdma_info->proto = PROTOCOLID_ROCE;
-
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
- NULL);
-
- p_rdma_info->num_qps = num_cons / 2;
-
- num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
-
- /* Each MR uses a single task */
- p_rdma_info->num_mrs = num_tasks;
-
- /* Queue zone lines are shared between RoCE and L2 in such a way that
- * they can be used by each without obstructing the other.
- */
- p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
- p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
-
- /* Allocate a struct with device params and fill it */
- p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
- if (!p_rdma_info->dev) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
- rc);
- goto free_rdma_info;
- }
-
- /* Allocate a struct with port params and fill it */
- p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
- if (!p_rdma_info->port) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
- rc);
- goto free_rdma_dev;
- }
-
- /* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
- "PD");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate pd_map, rc = %d\n",
- rc);
- goto free_rdma_port;
- }
-
- /* Allocate DPI bitmap */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count, "DPI");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate DPI bitmap, rc = %d\n", rc);
- goto free_pd_map;
- }
-
- /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
- * twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2, "CQ");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cq bitmap, rc = %d\n", rc);
- goto free_dpi_map;
- }
-
- /* Allocate bitmap for toggle bit for cq icids
- * We toggle the bit every time we create or resize cq for a given icid.
- * The maximum number of CQs is bounded to twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2, "Toggle");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
- goto free_cq_map;
- }
-
- /* Allocate bitmap for itids */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs, "MR");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate itids bitmaps, rc = %d\n", rc);
- goto free_toggle_map;
- }
-
- /* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
- "CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cid bitmap, rc = %d\n", rc);
- goto free_tid_map;
- }
-
- /* Allocate bitmap for cids used for responders/requesters. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
- "REAL_CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate real cid bitmap, rc = %d\n", rc);
- goto free_cid_map;
- }
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
- return 0;
-
-free_cid_map:
- kfree(p_rdma_info->cid_map.bitmap);
-free_tid_map:
- kfree(p_rdma_info->tid_map.bitmap);
-free_toggle_map:
- kfree(p_rdma_info->toggle_bits.bitmap);
-free_cq_map:
- kfree(p_rdma_info->cq_map.bitmap);
-free_dpi_map:
- kfree(p_rdma_info->dpi_map.bitmap);
-free_pd_map:
- kfree(p_rdma_info->pd_map.bitmap);
-free_rdma_port:
- kfree(p_rdma_info->port);
-free_rdma_dev:
- kfree(p_rdma_info->dev);
-free_rdma_info:
- kfree(p_rdma_info);
-
- return rc;
-}
-
-static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, bool check)
-{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
- goto end;
-
- DP_NOTICE(p_hwfn,
- "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
- bmap->name, bmap->max_count, weight);
-
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
- DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
- }
-
-end:
- kfree(bmap->bitmap);
- bmap->bitmap = NULL;
-}
-
-static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+void qed_roce_stop(struct qed_hwfn *p_hwfn)
{
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
@@ -383,780 +102,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
break;
}
}
-
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
-
- kfree(p_rdma_info->port);
- kfree(p_rdma_info->dev);
-
- kfree(p_rdma_info);
-}
-
-static void qed_rdma_free(struct qed_hwfn *p_hwfn)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
-
- qed_rdma_resc_free(p_hwfn);
-}
-
-static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
-{
- guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
- guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
- guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
- guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
- guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
-}
-
-static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_events *events;
-
- events = &p_hwfn->p_rdma_info->events;
-
- events->unaffiliated_event = params->events->unaffiliated_event;
- events->affiliated_event = params->events->affiliated_event;
- events->context = params->events->context;
-}
-
-static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- struct qed_dev *cdev = p_hwfn->cdev;
- u32 pci_status_control;
- u32 num_qps;
-
- /* Vendor specific information */
- dev->vendor_id = cdev->vendor_id;
- dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
- dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
-
- qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
- dev->node_guid = dev->sys_image_guid;
-
- dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
- RDMA_MAX_SGE_PER_RQ_WQE);
-
- if (cdev->rdma_max_sge)
- dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
-
- dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
-
- dev->max_inline = (cdev->rdma_max_inline) ?
- min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
- dev->max_inline;
-
- dev->max_wqe = QED_RDMA_MAX_WQE;
- dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
-
- /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
- * it is up-aligned to 16 and then to ILT page size within qed cxt.
- * This is OK in terms of ILT but we don't want to configure the FW
- * above its abilities
- */
- num_qps = ROCE_MAX_QPS;
- num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
- dev->max_qp = num_qps;
-
- /* CQs uses the same icids that QPs use hence they are limited by the
- * number of icids. There are two icids per QP.
- */
- dev->max_cq = num_qps * 2;
-
- /* The number of mrs is smaller by 1 since the first is reserved */
- dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
- dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
-
- /* The maximum CQE capacity per CQ supported.
- * max number of cqes will be in two layer pbl,
- * 8 is the pointer size in bytes
- * 32 is the size of cq element in bytes
- */
- if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
- dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
- else
- dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
-
- dev->max_mw = 0;
- dev->max_fmr = QED_RDMA_MAX_FMR;
- dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
- dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
- dev->max_pkey = QED_RDMA_MAX_P_KEY;
-
- dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
- dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- RDMA_REQ_RD_ATOMIC_ELM_SIZE;
- dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
- p_hwfn->p_rdma_info->num_qps;
- dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
- dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
- dev->max_pd = RDMA_MAX_PDS;
- dev->max_ah = p_hwfn->p_rdma_info->num_qps;
- dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
-
- /* Set capablities */
- dev->dev_caps = 0;
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
-
- /* Check atomic operations support in PCI configuration space. */
- pci_read_config_dword(cdev->pdev,
- cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
- &pci_status_control);
-
- if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
-}
-
-static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- port->max_msg_size = min_t(u64,
- (dev->max_mr_mw_fmr_size *
- p_hwfn->cdev->rdma_max_sge),
- BIT(31));
-
- port->pkey_bad_counter = 0;
-}
-
-static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 ll2_ethertype_en;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
-
- /* We delay writing to this reg until first cid is allocated. See
- * qed_cxt_dynamic_ilt_alloc function for more details
- */
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en | 0x01));
-
- if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
- DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
- return 0;
-}
-
-static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params,
- struct qed_ptt *p_ptt)
-{
- struct rdma_init_func_ramrod_data *p_ramrod;
- struct qed_rdma_cnq_params *p_cnq_pbl_list;
- struct rdma_init_func_hdr *p_params_header;
- struct rdma_cnq_params *p_cnq_params;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 cnq_id, sb_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
-
- /* Save the number of cnqs for the function close ramrod */
- p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- return rc;
-
- p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
-
- p_params_header = &p_ramrod->params_header;
- p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
- QED_RDMA_CNQ_RAM);
- p_params_header->num_cnqs = params->desired_cnq;
-
- if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
- p_params_header->cq_ring_mode = 1;
- else
- p_params_header->cq_ring_mode = 0;
-
- for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
- sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
- p_cnq_params = &p_ramrod->cnq_params[cnq_id];
- p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
- p_cnq_params->sb_num =
- cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
-
- p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
- p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
-
- DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
- p_cnq_pbl_list->pbl_ptr);
-
- /* we assume here that cnq_id and qz_offset are the same */
- p_cnq_params->queue_zone_num =
- cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
- cnq_id);
- }
-
- return qed_spq_post(p_hwfn, p_ent, NULL);
-}
-
-static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
- /* Tid 0 will be used as the key for "reserved MR".
- * The driver should allocate memory for it so it can be loaded but no
- * ramrod should be passed on it.
- */
- qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
- if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
- DP_NOTICE(p_hwfn,
- "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
-
- spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
- qed_rdma_init_devinfo(p_hwfn, params);
- qed_rdma_init_port(p_hwfn);
- qed_rdma_init_events(p_hwfn, params);
-
- rc = qed_rdma_reserve_lkey(p_hwfn);
- if (rc)
- return rc;
-
- rc = qed_rdma_init_hw(p_hwfn, p_ptt);
- if (rc)
- return rc;
-
- return qed_rdma_start_fw(p_hwfn, params, p_ptt);
-}
-
-static int qed_rdma_stop(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_close_func_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u32 ll2_ethertype_en;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
- return rc;
- }
-
- /* Disable RoCE search */
- qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en & 0xFFFE));
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Stop RoCE */
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto out;
-
- p_ramrod = &p_ent->ramrod.rdma_close_func;
-
- p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
- p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
-out:
- qed_rdma_free(p_hwfn);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 dpi_start_offset;
- u32 returned_id = 0;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
-
- /* Allocate DPI */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
- &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- out_params->dpi = (u16)returned_id;
-
- /* Calculate the corresponding DPI address */
- dpi_start_offset = p_hwfn->dpi_start_offset;
-
- out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size));
-
- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size);
-
- out_params->dpi_size = p_hwfn->dpi_size;
- out_params->wid_count = p_hwfn->wid_count;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
- return rc;
-}
-
-static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
-
- /* Link may have changed */
- p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
-
- p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
-
- return p_port;
-}
-
-static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
-
- /* Return struct with device parameters */
- return p_hwfn->p_rdma_info->dev;
-}
-
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
-{
- struct qed_hwfn *p_hwfn;
- u16 qz_num;
- u32 addr;
-
- p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
- DP_NOTICE(p_hwfn,
- "queue zone offset %d is too large (max is %d)\n",
- qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
- return;
- }
-
- qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
-
- REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- wmb();
-}
-
-static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
- struct qed_dev_rdma_info *info)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
-
- memset(info, 0, sizeof(*info));
-
- info->rdma_type = QED_RDMA_TYPE_ROCE;
- info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
-
- qed_fill_dev_info(cdev, &info->common);
-
- return 0;
-}
-
-static int qed_rdma_get_sb_start(struct qed_dev *cdev)
-{
- int feat_num;
-
- if (cdev->num_hwfns > 1)
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
- else
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
- cdev->num_hwfns;
-
- return feat_num;
-}
-
-static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
-{
- int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
- int n_msix = cdev->int_params.rdma_msix_cnt;
-
- return min_t(int, n_cnq, n_msix);
-}
-
-static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
-{
- int limit = 0;
-
- /* Mark the fastpath as free/used */
- cdev->int_params.fp_initialized = cnt ? true : false;
-
- if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
- DP_ERR(cdev,
- "qed roce supports only MSI-X interrupts (detected %d).\n",
- cdev->int_params.out.int_mode);
- return -EINVAL;
- } else if (cdev->int_params.fp_msix_cnt) {
- limit = cdev->int_params.rdma_msix_cnt;
- }
-
- if (!limit)
- return -ENOMEM;
-
- return min_t(int, cnt, limit);
-}
-
-static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
-{
- memset(info, 0, sizeof(*info));
-
- if (!cdev->int_params.fp_initialized) {
- DP_INFO(cdev,
- "Protocol driver requested interrupt information, but its support is not yet configured\n");
- return -EINVAL;
- }
-
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- int msix_base = cdev->int_params.rdma_msix_base;
-
- info->msix_cnt = cdev->int_params.rdma_msix_cnt;
- info->msix = &cdev->int_params.msix_table[msix_base];
-
- DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
- info->msix_cnt, msix_base);
- }
-
- return 0;
-}
-
-static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 returned_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
-
- /* Allocates an unused protection domain */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->pd_map, &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- *pd = (u16)returned_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
- return rc;
-}
-
-static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
-
- /* Returns a previously allocated protection domain for reuse */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static enum qed_rdma_toggle_bit
-qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
-{
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- enum qed_rdma_toggle_bit toggle_bit;
- u32 bmap_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
-
- /* the function toggle the bit that is related to a given icid
- * and returns the new toggle bit's value
- */
- bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
-
- spin_lock_bh(&p_info->lock);
- toggle_bit = !test_and_change_bit(bmap_id,
- p_info->toggle_bits.bitmap);
- spin_unlock_bh(&p_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
- toggle_bit);
-
- return toggle_bit;
-}
-
-static int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params,
- u16 *icid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- struct rdma_create_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 returned_id, start_cid;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
- params->cq_handle_hi, params->cq_handle_lo);
-
- /* Allocate icid */
- spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
- spin_unlock_bh(&p_info->lock);
-
- if (rc) {
- DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
- return rc;
- }
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_info->proto);
- *icid = returned_id + start_cid;
-
- /* Check if icid requires a page allocation */
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
- if (rc)
- goto err;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = *icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send create CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_CREATE_CQ,
- p_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_create_cq;
-
- p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
- p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
- p_ramrod->dpi = cpu_to_le16(params->dpi);
- p_ramrod->is_two_level_pbl = params->pbl_two_level;
- p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
- p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
- p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
- params->cnq_id;
- p_ramrod->int_timeout = params->int_timeout;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
-
- p_ramrod->toggle_bit = toggle_bit;
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc) {
- /* restore toggle bit */
- qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
- goto err;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
- return rc;
-
-err:
- /* release allocated icid */
- spin_lock_bh(&p_info->lock);
- qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
- spin_unlock_bh(&p_info->lock);
- DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
-
- return rc;
-}
-
-static int
-qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_destroy_cq_output_params *p_ramrod_res;
- struct rdma_destroy_cq_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed destroy cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send destroy CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DESTROY_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc)
- goto err;
-
- out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- /* Free icid */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-
- qed_bmap_release_id(p_hwfn,
- &p_hwfn->p_rdma_info->cq_map,
- (in_params->icid -
- qed_cxt_get_proto_cid_start(p_hwfn,
- p_hwfn->
- p_rdma_info->proto)));
-
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- return rc;
-}
-
-static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
-{
- p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
- p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
- p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
@@ -1210,7 +156,7 @@ void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1870,9 +816,9 @@ err:
return rc;
}
-static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -2011,7 +957,7 @@ err_resp:
return rc;
}
-static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -2050,138 +996,10 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-static int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- /* The following fields are filled in from qp and not FW as they can't
- * be modified by FW
- */
- out_params->mtu = qp->mtu;
- out_params->dest_qp = qp->dest_qp;
- out_params->incoming_atomic_en = qp->incoming_atomic_en;
- out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
- out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
- out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
- out_params->dgid = qp->dgid;
- out_params->flow_label = qp->flow_label;
- out_params->hop_limit_ttl = qp->hop_limit_ttl;
- out_params->traffic_class_tos = qp->traffic_class_tos;
- out_params->timeout = qp->ack_timeout;
- out_params->rnr_retry = qp->rnr_retry_cnt;
- out_params->retry_cnt = qp->retry_cnt;
- out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
- out_params->pkey_index = 0;
- out_params->max_rd_atomic = qp->max_rd_atomic_req;
- out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
- out_params->sqd_async = qp->sqd_async;
-
- rc = qed_roce_query_qp(p_hwfn, qp, out_params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- rc = qed_roce_destroy_qp(p_hwfn, qp);
-
- /* free qp params struct */
- kfree(qp);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
- return rc;
-}
-
-static struct qed_rdma_qp *
-qed_rdma_create_qp(void *rdma_cxt,
- struct qed_rdma_create_qp_in_params *in_params,
- struct qed_rdma_create_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_qp *qp;
- u8 max_stats_queues;
- int rc;
-
- if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
- rdma_cxt, in_params, out_params);
- return NULL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "qed rdma create qp called with qp_handle = %08x%08x\n",
- in_params->qp_handle_hi, in_params->qp_handle_lo);
-
- /* Some sanity checks... */
- max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
- if (in_params->stats_queue >= max_stats_queues) {
- DP_ERR(p_hwfn->cdev,
- "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
- in_params->stats_queue, max_stats_queues);
- return NULL;
- }
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
- return NULL;
- }
-
- rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
- qp->qpid = ((0xFF << 16) | qp->icid);
-
- DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
-
- if (rc) {
- kfree(qp);
- return NULL;
- }
-
- qp->cur_state = QED_ROCE_QP_STATE_RESET;
- qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
- qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
- qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
- qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
- qp->use_srq = in_params->use_srq;
- qp->signal_all = in_params->signal_all;
- qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
- qp->pd = in_params->pd;
- qp->dpi = in_params->dpi;
- qp->sq_cq_id = in_params->sq_cq_id;
- qp->sq_num_pages = in_params->sq_num_pages;
- qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
- qp->rq_cq_id = in_params->rq_cq_id;
- qp->rq_num_pages = in_params->rq_num_pages;
- qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
- qp->srq_id = in_params->srq_id;
- qp->req_offloaded = false;
- qp->resp_offloaded = false;
- qp->e2e_flow_control_en = qp->use_srq ? false : true;
- qp->stats_queue = in_params->stats_queue;
-
- out_params->icid = qp->icid;
- out_params->qp_id = qp->qpid;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
- return qp;
-}
-
-static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- enum qed_roce_qp_state prev_state,
- struct qed_rdma_modify_qp_in_params *params)
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
{
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
@@ -2286,331 +1104,6 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- enum qed_roce_qp_state prev_state;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
- qp->icid, params->new_state);
-
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
- qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
- qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
- qp->incoming_atomic_en = params->incoming_atomic_en;
- }
-
- /* Update QP structure with the updated values */
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
- qp->roce_mode = params->roce_mode;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
- qp->pkey = params->pkey;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
- qp->e2e_flow_control_en = params->e2e_flow_control_en;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
- qp->dest_qp = params->dest_qp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
- /* Indicates that the following parameters have changed:
- * Traffic class, flow label, hop limit, source GID,
- * destination GID, loopback indicator
- */
- qp->traffic_class_tos = params->traffic_class_tos;
- qp->flow_label = params->flow_label;
- qp->hop_limit_ttl = params->hop_limit_ttl;
-
- qp->sgid = params->sgid;
- qp->dgid = params->dgid;
- qp->udp_src_port = 0;
- qp->vlan_id = params->vlan_id;
- qp->mtu = params->mtu;
- qp->lb_indication = params->lb_indication;
- memcpy((u8 *)&qp->remote_mac_addr[0],
- (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
- if (params->use_local_mac) {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)&params->local_mac_addr[0], ETH_ALEN);
- } else {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
- }
- }
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
- qp->rq_psn = params->rq_psn;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
- qp->sq_psn = params->sq_psn;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
- qp->max_rd_atomic_req = params->max_rd_atomic_req;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
- qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
- qp->ack_timeout = params->ack_timeout;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
- qp->retry_cnt = params->retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
- qp->rnr_retry_cnt = params->rnr_retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
- qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
-
- qp->sqd_async = params->sqd_async;
-
- prev_state = qp->cur_state;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
- qp->cur_state = params->new_state;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
- qp->cur_state);
- }
-
- rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
- return rc;
-}
-
-static int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_register_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- enum rdma_tid_type tid_type;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (p_hwfn->p_rdma_info->last_tid < params->itid)
- p_hwfn->p_rdma_info->last_tid = params->itid;
-
- p_ramrod = &p_ent->ramrod.rdma_register_tid;
-
- p_ramrod->flags = 0;
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
- params->pbl_two_level);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
-
- /* Don't initialize D/C field, as it may override other bits. */
- if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
- params->page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
- p_hwfn->p_rdma_info->last_tid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
- params->remote_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
- params->remote_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
- params->remote_atomic);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
- params->local_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
- params->mw_bind);
-
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
- params->pbl_page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
-
- switch (params->tid_type) {
- case QED_RDMA_TID_REGISTERED_MR:
- tid_type = RDMA_TID_REGISTERED_MR;
- break;
- case QED_RDMA_TID_FMR:
- tid_type = RDMA_TID_FMR;
- break;
- case QED_RDMA_TID_MW_TYPE1:
- tid_type = RDMA_TID_MW_TYPE1;
- break;
- case QED_RDMA_TID_MW_TYPE2A:
- tid_type = RDMA_TID_MW_TYPE2A;
- break;
- default:
- rc = -EINVAL;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
-
- p_ramrod->itid = cpu_to_le32(params->itid);
- p_ramrod->key = params->key;
- p_ramrod->pd = cpu_to_le16(params->pd);
- p_ramrod->length_hi = (u8)(params->length >> 32);
- p_ramrod->length_lo = DMA_LO_LE(params->length);
- if (params->zbva) {
- /* Lower 32 bits of the registered MR address.
- * In case of zero based MR, will hold FBO
- */
- p_ramrod->va.hi = 0;
- p_ramrod->va.lo = cpu_to_le32(params->fbo);
- } else {
- DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
- }
- DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
-
- /* DIF */
- if (params->dif_enabled) {
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
- DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
- params->dif_error_addr);
- DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- return rc;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_deregister_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
- p_ramrod->itid = cpu_to_le32(itid);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
- /* Bit indicating that the TID is in use and a nig drain is
- * required before sending the ramrod again
- */
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- rc = -EBUSY;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to acquire PTT\n");
- return rc;
- }
-
- rc = qed_mcp_drain(p_hwfn, p_ptt);
- if (rc) {
- qed_ptt_release(p_hwfn, p_ptt);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Drain failed\n");
- return rc;
- }
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Resend the ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto,
- &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to init sp-element\n");
- return rc;
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Ramrod failed\n");
- return rc;
- }
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
- fw_return_code);
- return rc;
- }
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
- return rc;
-}
-
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -2636,414 +1129,43 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- return QED_LEADING_HWFN(cdev);
-}
+ u8 val;
-static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 val;
-
- val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
-
- qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
- DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
- "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
- val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
-}
-
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- p_hwfn->db_bar_no_edpm = true;
+ /* if any QPs are already active, we want to disable DPM, since their
+ * context information contains information from before the latest DCBx
+ * update. Otherwise enable it.
+ */
+ val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
+ p_hwfn->dcbx_no_edpm = (u8)val;
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-static int qed_rdma_start(void *rdma_cxt,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_ptt *p_ptt;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "desired_cnq = %08x\n", params->desired_cnq);
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- goto err;
-
- rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
- if (rc)
- goto err1;
-
- rc = qed_rdma_setup(p_hwfn, p_ptt, params);
- if (rc)
- goto err2;
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-
-err2:
- qed_rdma_free(p_hwfn);
-err1:
- qed_ptt_release(p_hwfn, p_ptt);
-err:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_init(struct qed_dev *cdev,
- struct qed_rdma_start_in_params *params)
+int qed_roce_setup(struct qed_hwfn *p_hwfn)
{
- return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
-}
-
-static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
-{
- struct qed_roce_ll2_packet *packet = cookie;
- struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
-
- roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+ return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
+ qed_roce_async_event);
}
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
- cookie, first_frag_addr,
- b_last_fragment, b_last_packet);
-}
-
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet)
-{
- struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
- struct qed_roce_ll2_rx_params params;
- struct qed_dev *cdev = p_hwfn->cdev;
- struct qed_roce_ll2_packet pkt;
-
- DP_VERBOSE(cdev,
- QED_MSG_LL2,
- "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
- (void *)(uintptr_t)rx_buf_addr,
- data_length, data_length_error);
-
- memset(&pkt, 0, sizeof(pkt));
- pkt.n_seg = 1;
- pkt.payload[0].baddr = rx_buf_addr;
- pkt.payload[0].len = data_length;
-
- memset(&params, 0, sizeof(params));
- params.vlan_id = vlan;
- *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
- *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
-
- if (data_length_error) {
- DP_ERR(cdev,
- "roce ll2 rx complete: data length error %d, length=%d\n",
- data_length_error, data_length);
- params.rc = -EINVAL;
- }
-
- roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
-}
-
-static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
- u8 *old_mac_address,
- u8 *new_mac_address)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- int rc = 0;
-
- if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
- DP_ERR(cdev,
- "qed roce mac filter failed - roce_info/ll2 NULL\n");
- return -EINVAL;
- }
-
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to acquire PTT\n");
- return -EINVAL;
- }
-
- mutex_lock(&hwfn->ll2->lock);
- if (old_mac_address)
- qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- old_mac_address);
- if (new_mac_address)
- rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- new_mac_address);
- mutex_unlock(&hwfn->ll2->lock);
-
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
-
- if (rc)
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to add mac filter\n");
-
- return rc;
-}
-
-static int qed_roce_ll2_start(struct qed_dev *cdev,
- struct qed_roce_ll2_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2;
- struct qed_ll2_conn ll2_params;
- int rc;
-
- if (!params) {
- DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
- return -EINVAL;
- }
- if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
- params->cbs.tx_cb, params->cbs.rx_cb);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(params->mac_address)) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
- params->mac_address);
- return -EINVAL;
- }
-
- /* Initialize */
- roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
- if (!roce_ll2) {
- DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
- return -ENOMEM;
- }
- roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
- roce_ll2->cbs = params->cbs;
- roce_ll2->cb_cookie = params->cb_cookie;
- mutex_init(&roce_ll2->lock);
-
- memset(&ll2_params, 0, sizeof(ll2_params));
- ll2_params.conn_type = QED_LL2_TYPE_ROCE;
- ll2_params.mtu = params->mtu;
- ll2_params.rx_drop_ttl0_flg = true;
- ll2_params.rx_vlan_removal_en = false;
- ll2_params.tx_dest = CORE_TX_DEST_NW;
- ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
- ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
- ll2_params.gsi_enable = true;
-
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
- params->max_rx_buffers,
- params->max_tx_buffers,
- &roce_ll2->handle);
- if (rc) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
- rc);
- goto err;
- }
-
- rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
- roce_ll2->handle);
- if (rc) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
- rc);
- goto err1;
- }
-
- hwfn->ll2 = roce_ll2;
-
- rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
- if (rc) {
- hwfn->ll2 = NULL;
- goto err2;
- }
- ether_addr_copy(roce_ll2->mac_address, params->mac_address);
-
- return 0;
-
-err2:
- qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err1:
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err:
- kfree(roce_ll2);
- return rc;
-}
-
-static int qed_roce_ll2_stop(struct qed_dev *cdev)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
- int rc;
-
- if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
- DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
- return -EINVAL;
- }
-
- /* remove LL2 MAC address filter */
- rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
- eth_zero_addr(roce_ll2->mac_address);
-
- rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
- roce_ll2->handle);
- if (rc)
- DP_ERR(cdev,
- "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
- rc);
-
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-
- roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+ u32 ll2_ethertype_en;
- kfree(roce_ll2);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
- return rc;
-}
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
-static int qed_roce_ll2_tx(struct qed_dev *cdev,
- struct qed_roce_ll2_packet *pkt,
- struct qed_roce_ll2_tx_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
- enum qed_ll2_roce_flavor_type qed_roce_flavor;
- u8 flags = 0;
- int rc;
- int i;
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
- if (!pkt || !params) {
- DP_ERR(cdev,
- "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
- cdev, pkt, params);
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
return -EINVAL;
}
- qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
- : QED_LL2_RROCE;
-
- if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
-
- /* Tx header */
- rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
- 1 + pkt->n_seg, 0, flags, 0,
- QED_LL2_TX_DEST_NW,
- qed_roce_flavor, pkt->header.baddr,
- pkt->header.len, pkt, 1);
- if (rc) {
- DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
- return QED_ROCE_TX_HEAD_FAILURE;
- }
-
- /* Tx payload */
- for (i = 0; i < pkt->n_seg; i++) {
- rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
- roce_ll2->handle,
- pkt->payload[i].baddr,
- pkt->payload[i].len);
- if (rc) {
- /* If failed not much to do here, partial packet has
- * been posted * we can't free memory, will need to wait
- * for completion
- */
- DP_ERR(cdev,
- "roce ll2 tx: payload failed (rc=%d)\n", rc);
- return QED_ROCE_TX_FRAG_FAILURE;
- }
- }
-
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
return 0;
}
-
-static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
- struct qed_roce_ll2_buffer *buf,
- u64 cookie, u8 notify_fw)
-{
- return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->ll2->handle,
- buf->baddr, buf->len,
- (void *)(uintptr_t)cookie, notify_fw);
-}
-
-static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
-
- return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
- roce_ll2->handle, stats);
-}
-
-static const struct qed_rdma_ops qed_rdma_ops_pass = {
- .common = &qed_common_ops_pass,
- .fill_dev_info = &qed_fill_rdma_dev_info,
- .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
- .rdma_init = &qed_rdma_init,
- .rdma_add_user = &qed_rdma_add_user,
- .rdma_remove_user = &qed_rdma_remove_user,
- .rdma_stop = &qed_rdma_stop,
- .rdma_query_port = &qed_rdma_query_port,
- .rdma_query_device = &qed_rdma_query_device,
- .rdma_get_start_sb = &qed_rdma_get_sb_start,
- .rdma_get_rdma_int = &qed_rdma_get_int,
- .rdma_set_rdma_int = &qed_rdma_set_int,
- .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
- .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
- .rdma_alloc_pd = &qed_rdma_alloc_pd,
- .rdma_dealloc_pd = &qed_rdma_free_pd,
- .rdma_create_cq = &qed_rdma_create_cq,
- .rdma_destroy_cq = &qed_rdma_destroy_cq,
- .rdma_create_qp = &qed_rdma_create_qp,
- .rdma_modify_qp = &qed_rdma_modify_qp,
- .rdma_query_qp = &qed_rdma_query_qp,
- .rdma_destroy_qp = &qed_rdma_destroy_qp,
- .rdma_alloc_tid = &qed_rdma_alloc_tid,
- .rdma_free_tid = &qed_rdma_free_tid,
- .rdma_register_tid = &qed_rdma_register_tid,
- .rdma_deregister_tid = &qed_rdma_deregister_tid,
- .roce_ll2_start = &qed_roce_ll2_start,
- .roce_ll2_stop = &qed_roce_ll2_stop,
- .roce_ll2_tx = &qed_roce_ll2_tx,
- .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
- .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
- .roce_ll2_stats = &qed_roce_ll2_stats,
-};
-
-const struct qed_rdma_ops *qed_get_rdma_ops(void)
-{
- return &qed_rdma_ops_pass;
-}
-EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 9742af516183..f801f39fde61 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -32,191 +32,28 @@
#ifndef _QED_ROCE_H
#define _QED_ROCE_H
#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_roce_if.h>
-#include "qed.h"
-#include "qed_dev_api.h"
-#include "qed_hsi.h"
-#include "qed_ll2.h"
-#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
-#define QED_RDMA_MAX_P_KEY (1)
-#define QED_RDMA_MAX_WQE (0x7FFF)
-#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
-#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
-#define QED_RDMA_ACK_DELAY (15)
-#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
-#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
-#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
-/* Add 1 for header element */
-#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
-#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
-#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
-#define QED_RDMA_MAX_SRQS (32 * 1024)
-
-#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
-#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
-
-enum qed_rdma_toggle_bit {
- QED_RDMA_TOGGLE_BIT_CLEAR = 0,
- QED_RDMA_TOGGLE_BIT_SET = 1
-};
-
-#define QED_RDMA_MAX_BMAP_NAME (10)
-struct qed_bmap {
- unsigned long *bitmap;
- u32 max_count;
- char name[QED_RDMA_MAX_BMAP_NAME];
-};
-
-struct qed_rdma_info {
- /* spin lock to protect bitmaps */
- spinlock_t lock;
-
- struct qed_bmap cq_map;
- struct qed_bmap pd_map;
- struct qed_bmap tid_map;
- struct qed_bmap qp_map;
- struct qed_bmap srq_map;
- struct qed_bmap cid_map;
- struct qed_bmap real_cid_map;
- struct qed_bmap dpi_map;
- struct qed_bmap toggle_bits;
- struct qed_rdma_events events;
- struct qed_rdma_device *dev;
- struct qed_rdma_port *port;
- u32 last_tid;
- u8 num_cnqs;
- u32 num_qps;
- u32 num_mrs;
- u16 queue_zone_base;
- u16 max_queue_zones;
- enum protocol_type proto;
-};
-
-struct qed_rdma_qp {
- struct regpair qp_handle;
- struct regpair qp_handle_async;
- u32 qpid;
- u16 icid;
- enum qed_roce_qp_state cur_state;
- bool use_srq;
- bool signal_all;
- bool fmr_and_reserved_lkey;
-
- bool incoming_rdma_read_en;
- bool incoming_rdma_write_en;
- bool incoming_atomic_en;
- bool e2e_flow_control_en;
-
- u16 pd;
- u16 pkey;
- u32 dest_qp;
- u16 mtu;
- u16 srq_id;
- u8 traffic_class_tos;
- u8 hop_limit_ttl;
- u16 dpi;
- u32 flow_label;
- bool lb_indication;
- u16 vlan_id;
- u32 ack_timeout;
- u8 retry_cnt;
- u8 rnr_retry_cnt;
- u8 min_rnr_nak_timer;
- bool sqd_async;
- union qed_gid sgid;
- union qed_gid dgid;
- enum roce_mode roce_mode;
- u16 udp_src_port;
- u8 stats_queue;
-
- /* requeseter */
- u8 max_rd_atomic_req;
- u32 sq_psn;
- u16 sq_cq_id;
- u16 sq_num_pages;
- dma_addr_t sq_pbl_ptr;
- void *orq;
- dma_addr_t orq_phys_addr;
- u8 orq_num_pages;
- bool req_offloaded;
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
- /* responder */
- u8 max_rd_atomic_resp;
- u32 rq_psn;
- u16 rq_cq_id;
- u16 rq_num_pages;
- dma_addr_t rq_pbl_ptr;
- void *irq;
- dma_addr_t irq_phys_addr;
- u8 irq_num_pages;
- bool resp_offloaded;
- u32 cq_prod;
+int qed_roce_setup(struct qed_hwfn *p_hwfn);
+void qed_roce_stop(struct qed_hwfn *p_hwfn);
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid);
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
- u8 remote_mac_addr[6];
- u8 local_mac_addr[6];
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
- void *shared_queue;
- dma_addr_t shared_queue_phys_addr;
-};
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params);
-#if IS_ENABLED(CONFIG_QED_RDMA)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-#else
-static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- union rdma_eqe_data *rdma_data) {}
-static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment,
- bool b_last_packet) {}
-static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment,
- bool b_last_packet) {}
-static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo,
- bool b_last_packet) {}
-#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3357bbefa445..ab4ad8a1e2a5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -104,12 +104,17 @@ union ramrod_data {
struct roce_query_qp_req_ramrod_data roce_query_qp_req;
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+ struct roce_init_func_ramrod_data roce_init_func;
struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
- struct roce_init_func_ramrod_data roce_init_func;
+ struct iwarp_create_qp_ramrod_data iwarp_create_qp;
+ struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
+ struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
+ struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
+ struct iwarp_init_func_ramrod_data iwarp_init_func;
struct fcoe_init_ramrod_params fcoe_init;
struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
@@ -120,6 +125,7 @@ union ramrod_data {
struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
struct vf_start_ramrod_data vf_start;
@@ -173,6 +179,22 @@ struct qed_consq {
struct qed_chain chain;
};
+typedef int
+(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb);
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct qed_spq {
spinlock_t lock; /* SPQ lock */
@@ -202,6 +224,7 @@ struct qed_spq {
u32 comp_count;
u32 cid;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
/**
@@ -270,28 +293,23 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem);
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the SPQ to its start state.
+ * @brief qed_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ * @brief qed_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_eq_prod_update - update the FW with default EQ producer
@@ -342,28 +360,23 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
*
* @param p_hwfn
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief qed_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_free(struct qed_hwfn *p_hwfn);
/**
* @file
@@ -401,6 +414,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn
* @param mode
* @param allow_npar_tx_switch
@@ -409,6 +423,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
@@ -426,6 +441,15 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
+ * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
+
+/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
*
* This ramrod is sent to close a Physical Function (PF). It is the last ramrod
@@ -442,6 +466,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index bc3694e91b85..46d0c3cb83a5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -185,22 +185,20 @@ static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
}
static void
-__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type,
- u8 *p_update_port, __le16 *p_port,
+ u8 *p_update_port,
+ __le16 *p_port,
struct qed_tunn_update_udp_port *p_udp_port)
{
- __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = cpu_to_le16(p_udp_port->port);
@@ -219,33 +217,27 @@ qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
@@ -261,17 +253,18 @@ static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
}
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn)
{
if (p_tunn->vxlan_port.b_update_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@@ -289,33 +282,29 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
@@ -412,7 +401,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (p_tunn)
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->cdev->tunnel);
return rc;
}
@@ -443,6 +433,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
@@ -477,7 +468,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
return rc;
}
@@ -523,3 +514,27 @@ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f6423a139ca0..be48d9abd001 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -54,7 +54,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
/***************************************************************************
* Structures & Definitions
@@ -302,32 +302,16 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_QED_RDMA)
- case PROTOCOLID_ROCE:
- qed_roce_async_event(p_hwfn, p_eqe->opcode,
- &p_eqe->data.rdma_data);
- return 0;
-#endif
- case PROTOCOLID_COMMON:
- return qed_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- case PROTOCOLID_ISCSI:
- if (!IS_ENABLED(CONFIG_QED_ISCSI))
- return -EINVAL;
+ qed_spq_async_comp_cb cb;
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
- return p_iscsi->event_cb(p_iscsi->event_context,
- p_eqe->opcode, &p_eqe->data);
- } else {
- DP_NOTICE(p_hwfn,
- "iSCSI async completion is not set\n");
- return -EINVAL;
- }
- default:
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
}
}
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return 0;
+}
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
@@ -403,14 +409,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -419,31 +425,35 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain))
+ &p_eq->chain, NULL))
goto eq_allocate_fail;
/* register EQ completion on the SP SB */
qed_int_register_cb(p_hwfn, qed_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return 0;
eq_allocate_fail:
- qed_eq_free(p_hwfn, p_eq);
- return NULL;
+ kfree(p_eq);
+ return -ENOMEM;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn)
{
- qed_chain_reset(&p_eq->chain);
+ qed_chain_reset(&p_hwfn->p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn)
{
- if (!p_eq)
+ if (!p_hwfn->p_eq)
return;
- qed_chain_free(p_hwfn->cdev, &p_eq->chain);
- kfree(p_eq);
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
+
+ kfree(p_hwfn->p_eq);
+ p_hwfn->p_eq = NULL;
}
/***************************************************************************
@@ -543,7 +553,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain))
+ &p_spq->chain, NULL))
goto spq_allocate_fail;
/* allocate and fill the SPQ elements (incl. ramrod data list) */
@@ -583,8 +593,8 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
}
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
- ;
kfree(p_spq);
+ p_hwfn->p_spq = NULL;
}
int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
@@ -934,14 +944,14 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+int qed_consq_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -949,25 +959,29 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain))
+ 0x80, &p_consq->chain, NULL))
goto consq_allocate_fail;
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return 0;
consq_allocate_fail:
- qed_consq_free(p_hwfn, p_consq);
- return NULL;
+ kfree(p_consq);
+ return -ENOMEM;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn)
{
- qed_chain_reset(&p_consq->chain);
+ qed_chain_reset(&p_hwfn->p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn)
{
- if (!p_consq)
+ if (!p_hwfn->p_consq)
return;
- qed_chain_free(p_hwfn->cdev, &p_consq->chain);
- kfree(p_consq);
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
+
+ kfree(p_hwfn->p_consq);
+ p_hwfn->p_consq = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f5ed54d611ec..2cfd3bd9a031 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -44,6 +44,26 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+
+static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= QED_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= QED_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
/* IOV ramrods */
static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
@@ -178,6 +198,19 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+static struct qed_queue_cid *
+qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return NULL;
+}
+
enum qed_iov_validate_q_mode {
QED_IOV_VALIDATE_Q_NA,
QED_IOV_VALIDATE_Q_ENABLE,
@@ -190,12 +223,24 @@ static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
enum qed_iov_validate_q_mode mode,
bool b_is_tx)
{
+ int i;
+
if (mode == QED_IOV_VALIDATE_Q_NA)
return true;
- if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
- (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct qed_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (!p_qcid->p_cid)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
return mode == QED_IOV_VALIDATE_Q_ENABLE;
+ }
/* In case we haven't found any valid cid, then its disabled */
return mode == QED_IOV_VALIDATE_Q_DISABLE;
@@ -378,33 +423,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
return 0;
}
-static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- struct qed_igu_block *p_sb;
- u16 sb_id;
- u32 val;
-
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
- return;
- }
-
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
- sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & QED_IGU_STATUS_FREE) &&
- !(p_sb->status & QED_IGU_STATUS_PF)) {
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- qed_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
-}
-
static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
{
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
@@ -552,20 +570,24 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_sriov_eqe_event);
+
return qed_iov_allocate_vfdb(p_hwfn);
}
-void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_iov_setup(struct qed_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
qed_iov_setup_vfdb(p_hwfn);
- qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void qed_iov_free(struct qed_hwfn *p_hwfn)
{
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info);
@@ -747,6 +769,35 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
}
+static int
+qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!QED_IS_BB(p_hwfn->cdev)) {
+ qed_for_each_vf(p_hwfn, i) {
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = max_t(u8, current_max, p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return 0;
+}
+
static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -771,7 +822,8 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -838,45 +890,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u16 num_rx_queues)
{
- struct qed_igu_block *igu_blocks;
- int qid = 0, igu_id = 0;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
u32 val = 0;
- igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
- while ((qid < num_rx_queues) &&
- (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
- if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
- struct cau_sb_entry sb_entry;
-
- vf->igu_sbs[qid] = (u16)igu_id;
- igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
-
- SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
- qed_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
- val);
-
- /* Configure igu sb in CAU which were marked valid */
- qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_hwfn->rel_pf_id,
- vf->abs_vf_id, 1);
- qed_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- igu_id * sizeof(u64), 2, 0);
- qid++;
- }
- igu_id++;
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = qed_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, 0);
}
vf->num_sbs = (u8) num_rx_queues;
@@ -901,10 +944,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt, addr, val);
- p_info->igu_map.igu_blocks[igu_id].status |=
- QED_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;
@@ -1028,20 +1069,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_avaiable_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
+ struct qed_vf_queue *p_queue = &vf->vf_queues[i];
p_queue->fw_rx_qid = p_params->req_rx_queue[i];
p_queue->fw_tx_qid = p_params->req_tx_queue[i];
- /* CIDs are per-VF, so no problem having them 0-based. */
- p_queue->fw_cid = i;
-
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
- vf->relative_vf_id,
- i, vf->igu_sbs[i],
- p_queue->fw_rx_qid,
- p_queue->fw_tx_qid, p_queue->fw_cid);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
/* Update the link configuration in bulletin */
@@ -1328,7 +1364,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
@@ -1341,16 +1377,15 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
- struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
- if (p_queue->p_rx_cid) {
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
- p_queue->p_rx_cid = NULL;
- }
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
- if (p_queue->p_tx_cid) {
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
- p_queue->p_tx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = NULL;
}
}
@@ -1359,13 +1394,67 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
+ qed_db_addr_vf(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (p_hwfn->cdev->num_hwfns > 1)
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = min_t(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1383,7 +1472,7 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_resp->num_rxqs; i++) {
qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
@@ -1392,6 +1481,8 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
+ qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
@@ -1403,10 +1494,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
- p_resp->num_mc_filters < p_req->num_mc_filters) {
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs,
p_resp->num_rxqs,
@@ -1418,7 +1510,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
@@ -1534,6 +1628,15 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+
qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
@@ -1758,9 +1861,11 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
- struct qed_queue_cid *p_cid;
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct qed_queue_cid *p_cid = NULL;
- p_cid = p_vf->vf_queues[i].p_rx_cid;
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
if (!p_cid)
continue;
@@ -1951,16 +2056,55 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
+static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool b_is_tx)
+{
+ struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return QED_IOV_LEGACY_QID_TX;
+ else
+ return QED_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (!p_qid_tlv) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return QED_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return QED_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- struct qed_vf_q_info *p_queue;
+ u8 qid_usage_idx, vf_legacy = 0;
struct vfpf_start_rxq_tlv *req;
- bool b_legacy_vf = false;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
int rc;
req = &mbx->req_virt->start_rxq;
@@ -1970,53 +2114,64 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Acquire a new queue-cid */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
memset(&params, 0, sizeof(params));
params.queue_id = p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
- vf->opaque_fid,
- p_queue->fw_cid,
- req->rx_qid, &params);
- if (!p_queue->p_rx_cid)
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, true, &vf_params);
+ if (!p_cid)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN) {
- b_legacy_vf = true;
- } else {
+ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
- }
- p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
- rc = qed_eth_rxq_start_ramrod(p_hwfn,
- p_queue->p_rx_cid,
+ rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr, req->cqe_pbl_size);
if (rc) {
status = PFVF_STATUS_FAILURE;
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
- p_queue->p_rx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
vf->num_active_rxqs++;
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ !!(vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD));
}
static void
@@ -2209,7 +2364,7 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
if (b_update_required) {
u16 geneve_port;
- rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
@@ -2235,7 +2390,8 @@ send_resp:
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *p_vf, u8 status)
+ struct qed_vf_info *p_vf,
+ u32 cid, u8 status)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
@@ -2263,12 +2419,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
@@ -2278,10 +2430,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_txq_tlv *req;
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
+ u32 cid = 0;
int rc;
u16 pq;
@@ -2289,89 +2446,126 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_txq;
if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
- QED_IOV_VALIDATE_Q_DISABLE) ||
+ QED_IOV_VALIDATE_Q_NA) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Acquire a new queue-cid */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
- vf->opaque_fid,
- p_queue->fw_cid,
- req->tx_qid, &params);
- if (!p_queue->p_tx_cid)
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, false, &vf_params);
+ if (!p_cid)
goto out;
pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
- rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
status = PFVF_STATUS_FAILURE;
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
- p_queue->p_tx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
status = PFVF_STATUS_SUCCESS;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
- u16 rxq_id, bool cqe_completion)
+ u16 rxq_id,
+ u8 qid_usage_idx, bool cqe_completion)
{
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
int rc = 0;
- if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
- QED_IOV_VALIDATE_Q_ENABLE)) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
- vf->relative_vf_id, rxq_id);
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
return -EINVAL;
}
p_queue = &vf->vf_queues[rxq_id];
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct qed_queue_cid *p_cid;
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id,
+ rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
+ return -EINVAL;
+ }
+
+ /* Now that we know we have a valid Rx-queue - close it */
rc = qed_eth_rx_queue_stop(p_hwfn,
- p_queue->p_rx_cid,
+ p_queue->cids[qid_usage_idx].p_cid,
false, cqe_completion);
if (rc)
return rc;
- p_queue->p_rx_cid = NULL;
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
vf->num_active_rxqs--;
return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *vf, u16 txq_id)
+ struct qed_vf_info *vf,
+ u16 txq_id, u8 qid_usage_idx)
{
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
int rc = 0;
- if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
- QED_IOV_VALIDATE_Q_ENABLE))
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
return -EINVAL;
p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return -EINVAL;
- rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
if (rc)
return rc;
- p_queue->p_tx_cid = NULL;
-
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
return 0;
}
@@ -2383,6 +2577,7 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
@@ -2398,8 +2593,13 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
goto out;
}
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->cqe_completion);
+ qid_usage_idx, req->cqe_completion);
if (!rc)
status = PFVF_STATUS_SUCCESS;
out:
@@ -2415,6 +2615,7 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
@@ -2429,7 +2630,13 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
- rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
if (!rc)
status = PFVF_STATUS_SUCCESS;
@@ -2449,7 +2656,7 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
+ u8 qid_usage_idx;
int rc;
u8 i;
@@ -2457,19 +2664,42 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- /* Validate inputs */
- for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ /* There shouldn't exist a VF that uses queue-qids yet uses this
+ * API with multiple Rx queues. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!qed_iov_validate_rxq(p_hwfn, vf, i,
- QED_IOV_VALIDATE_Q_ENABLE)) {
- DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
- vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+ QED_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
goto out;
}
+ }
/* Prepare the handlers */
for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
- handlers[i] = vf->vf_queues[qid].p_rx_cid;
+ u16 qid = req->rx_qid + i;
+
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
}
rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2683,6 +2913,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
(1 << p_rss_tlv->rss_table_size_log));
for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_cid;
+
q_idx = p_rss_tlv->rss_ind_table[i];
if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
QED_IOV_VALIDATE_Q_ENABLE)) {
@@ -2694,7 +2926,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
goto out;
}
- p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
+ p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
@@ -3610,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
}
}
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data)
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 81a497ce6585..c2e44bce398c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -149,12 +149,21 @@ struct qed_iov_vf_mbx {
struct vfpf_first_tlv first_tlv;
};
-struct qed_vf_q_info {
+#define QED_IOV_LEGACY_QID_RX (0)
+#define QED_IOV_LEGACY_QID_TX (1)
+#define QED_IOV_QID_INVALID (0xFE)
+
+struct qed_vf_queue_cid {
+ bool b_is_tx;
+ struct qed_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct qed_vf_queue {
u16 fw_rx_qid;
- struct qed_queue_cid *p_rx_cid;
u16 fw_tx_qid;
- struct qed_queue_cid *p_tx_cid;
- u8 fw_cid;
+
+ struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
@@ -212,7 +221,8 @@ struct qed_vf_info {
u8 num_mac_filters;
u8 num_vlan_filters;
- struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+
+ struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
@@ -316,9 +326,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn);
* @brief qed_iov_setup - setup sriov related resources
*
* @param p_hwfn
- * @param p_ptt
*/
-void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_iov_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_iov_free - free sriov related resources
@@ -335,17 +344,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data);
-
-/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
@@ -397,7 +395,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
{
}
@@ -409,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
-static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo, union event_ring_data *data)
-{
- return -EINVAL;
-}
-
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 11d71e5eea14..1926d1ed439f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -153,6 +153,77 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
return rc;
}
+static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ qed_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return _qed_vf_pf_release(p_hwfn, true);
+}
+
#define VF_ACQUIRE_THRESH 3
static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
@@ -160,7 +231,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
{
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs,
p_resp->num_rxqs,
p_req->num_rxqs,
@@ -171,7 +242,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
@@ -180,6 +252,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
@@ -204,6 +277,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
@@ -216,6 +290,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar) {
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+ p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
+ }
+
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
@@ -307,6 +388,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true;
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -338,10 +426,27 @@ exit:
return rc;
}
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
struct qed_vf_iov *p_iov;
u32 reg;
+ int rc;
/* Set number of hwfns - might be overriden once leading hwfn learns
* actual configuration from PF.
@@ -349,10 +454,6 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->num_hwfns = 1;
- /* Set the doorbell bar. Assumption: regview is set */
- p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
- PXP_VF_BAR0_START_DQ;
-
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
@@ -364,6 +465,30 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
if (!p_iov)
return -ENOMEM;
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (!p_hwfn->doorbells) {
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 __iomem *)
+ p_hwfn->regview + PXP_VF_BAR0_START_DQ;
+ }
+
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
@@ -403,7 +528,33 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.personality = QED_PCI_ETH;
- return qed_vf_pf_acquire(p_hwfn);
+ rc = qed_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (!rc && p_iov->b_doorbell_bar &&
+ !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ (p_hwfn->cdev->num_hwfns > 1)) {
+ rc = _qed_vf_pf_release(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = qed_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
+
+ return rc;
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -588,8 +739,8 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
@@ -609,6 +760,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
}
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -657,6 +811,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -697,8 +853,10 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -728,8 +886,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- qid, *pp_doorbell, resp->offset);
+ "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
exit:
qed_vf_pf_req_end(p_hwfn, rc);
@@ -749,6 +907,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -792,9 +952,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
req->only_untagged = only_untagged;
/* status blocks */
- for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
- if (p_hwfn->sbs_info[i])
- req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -1095,54 +1258,6 @@ exit:
return rc;
}
-int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
-{
- struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
- struct pfvf_def_resp_tlv *resp;
- struct vfpf_first_tlv *req;
- u32 size;
- int rc;
-
- /* clear mailbox and prep first tlv */
- req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
- /* add list termination tlv */
- qed_add_tlv(p_hwfn, &p_iov->offset,
- CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
-
- resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
- if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
- rc = -EAGAIN;
-
- qed_vf_pf_req_end(p_hwfn, rc);
-
- p_hwfn->b_int_enabled = 0;
-
- if (p_iov->vf2pf_request)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(union vfpf_tlvs),
- p_iov->vf2pf_request,
- p_iov->vf2pf_request_phys);
- if (p_iov->pf2vf_reply)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(union pfvf_tlvs),
- p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
-
- if (p_iov->bulletin.p_virt) {
- size = sizeof(struct qed_bulletin_content);
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- p_iov->bulletin.p_virt, p_iov->bulletin.phys);
- }
-
- kfree(p_hwfn->vf_iov_info);
- p_hwfn->vf_iov_info = NULL;
-
- return rc;
-}
-
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
@@ -1240,6 +1355,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
@@ -1342,6 +1475,16 @@ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+ *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
+}
+
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
memcpy(port_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 34ac70b0e5fe..34d9b882a780 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -46,7 +46,8 @@ struct vf_pf_resc_request {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u16 padding;
+ u8 num_cids;
+ u8 padding;
};
struct hw_sb_info {
@@ -113,6 +114,17 @@ struct vfpf_acquire_tlv {
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
@@ -185,6 +197,9 @@ struct pfvf_acquire_resp_tlv {
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
+
u16 db_size;
u8 indices_per_sb;
u8 os_type;
@@ -193,7 +208,8 @@ struct pfvf_acquire_resp_tlv {
u16 chip_rev;
u8 dev_type;
- u8 padding;
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
struct pfvf_stats_info stats_info;
@@ -221,7 +237,8 @@ struct pfvf_acquire_resp_tlv {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u8 padding[2];
+ u8 num_cids;
+ u8 padding;
} resc;
u32 bulletin_size;
@@ -234,6 +251,16 @@ struct pfvf_start_queue_resp_tlv {
u8 padding[4];
};
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
@@ -597,6 +624,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_RESERVED,
+ CHANNEL_TLV_QID,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -605,6 +634,12 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default, and maximum possible number.
+ */
+#define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
+#define QED_ETH_VF_MAX_NUM_CIDS (250)
+
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
@@ -627,6 +662,19 @@ struct qed_vf_iov {
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
+
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
};
#ifdef CONFIG_QED_SRIOV
@@ -676,6 +724,22 @@ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
+
+/**
+ * @brief Get number of available connections [both Rx and Tx] for VF
+ *
+ * @param p_hwfn
+ * @param num_cids - allocated number of connections
+ */
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
+
+/**
* @brief Get port mac address for VF
*
* @param p_hwfn
@@ -837,6 +901,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be NULL [during removal].
+ */
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb);
+
+/**
* @brief qed_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
@@ -917,6 +991,8 @@ void qed_iov_vf_task(struct work_struct *work);
void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
+
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -938,6 +1014,14 @@ static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
}
+static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+}
+
+static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+}
+
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
@@ -1021,6 +1105,11 @@ static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return 0;
}
+static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
+ struct qed_sb_info *p_sb)
+{
+}
+
static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
@@ -1089,6 +1178,13 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
{
return -EINVAL;
}
+
+static inline u32
+qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index bc5f7c3b277d..75408fbb7680 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_QED_RDMA) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_rdma.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9b4f08b6f9b9..4dfb238221f9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
@@ -153,8 +154,8 @@ struct qede_vlan {
struct qede_rdma_dev {
struct qedr_dev *qedr_dev;
struct list_head entry;
- struct list_head roce_event_list;
- struct workqueue_struct *roce_wq;
+ struct list_head rdma_event_list;
+ struct workqueue_struct *rdma_wq;
};
struct qede_ptp;
@@ -197,7 +198,6 @@ struct qede_dev {
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
- unsigned char primary_mac[ETH_ALEN];
/* Smaller private varaiant of the RTNL lock */
struct mutex qede_lock;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index a9e7379313db..6e7747b9b95e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -313,7 +313,6 @@ static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
.ieee_setets = qede_dcbnl_ieee_setets,
.ieee_getapp = qede_dcbnl_ieee_getapp,
.ieee_setapp = qede_dcbnl_ieee_setapp,
- .getdcbx = qede_dcbnl_getdcbx,
.ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
.ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
.getstate = qede_dcbnl_getstate,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 172b292241a5..6a03d3e66cff 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -506,6 +506,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.autoneg = false;
params.forced_speed = base->speed;
switch (base->speed) {
+ case SPEED_1000:
+ if (!(current_link.supported_caps &
+ QED_LM_1000baseT_Full_BIT)) {
+ DP_INFO(edev, "1G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_1000baseT_Full_BIT;
+ break;
case SPEED_10000:
if (!(current_link.supported_caps &
QED_LM_10000baseKR_Full_BIT)) {
@@ -1282,7 +1290,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
- int i, idx, val;
+ int i, idx;
+ u16 val;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -1297,14 +1306,15 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val;
val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
- first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(val);
/* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data,
@@ -1317,10 +1327,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* update the first BD with the actual num BDs */
first_bd->data.nbds = 1;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
- val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- txq->tx_db.data.bd_prod = val;
+ val = qed_chain_get_prod_idx(&txq->tx_pbl);
+ txq->tx_db.data.bd_prod = cpu_to_le16(val);
/* wmb makes sure that the BDs data is updated before updating the
* producer, otherwise FW may read old data from the BDs.
@@ -1351,7 +1361,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 333876c19d7d..f939db5bac5f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -495,12 +495,16 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
+ __qede_lock(edev);
+
/* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
+ __qede_unlock(edev);
return;
+ }
ether_addr_copy(edev->ndev->dev_addr, mac);
- ether_addr_copy(edev->primary_mac, mac);
+ __qede_unlock(edev);
}
void qede_fill_rss_params(struct qede_dev *edev,
@@ -1033,6 +1037,7 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!edev->xdp_prog;
+ xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
@@ -1061,41 +1066,51 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
{
struct qede_dev *edev = netdev_priv(ndev);
struct sockaddr *addr = p;
- int rc;
-
- ASSERT_RTNL(); /* @@@TBD To be removed */
+ int rc = 0;
- DP_INFO(edev, "Set_mac_addr called\n");
+ /* Make sure the state doesn't transition while changing the MAC.
+ * Also, all flows accessing the dev_addr field are doing that under
+ * this lock.
+ */
+ __qede_lock(edev);
if (!is_valid_ether_addr(addr->sa_data)) {
DP_NOTICE(edev, "The MAC address is not valid\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
- DP_NOTICE(edev, "qed prevents setting MAC\n");
- return -EINVAL;
+ DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
+ addr->sa_data);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ ndev->dev_addr);
+ if (rc)
+ goto out;
}
ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
- if (!netif_running(ndev)) {
- DP_NOTICE(edev, "The device is currently down\n");
- return 0;
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "The device is currently down\n");
+ goto out;
}
- /* Remove the previous primary mac */
- rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
- edev->primary_mac);
- if (rc)
- return rc;
-
- edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+ edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
- /* Add MAC filter according to the new unicast HW MAC address */
- ether_addr_copy(edev->primary_mac, ndev->dev_addr);
- return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
- edev->primary_mac);
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ ndev->dev_addr);
+out:
+ __qede_unlock(edev);
+ return rc;
}
static int
@@ -1200,7 +1215,7 @@ void qede_config_rx_mode(struct net_device *ndev)
* (configrue / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
- edev->primary_mac);
+ edev->ndev->dev_addr);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7b6f41d06245..6fc854b120b0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
/* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{
- u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_cons;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd;
@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split)
{
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_prod;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0;
@@ -333,8 +333,9 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
struct sw_rx_data *metadata, u16 padding, u16 length)
{
struct qede_tx_queue *txq = fp->xdp_tx;
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct eth_tx_1st_bd *first_bd;
+ u16 idx = txq->sw_tx_prod;
+ u16 val;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++;
@@ -346,9 +347,11 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
memset(first_bd, 0, sizeof(*first_bd));
first_bd->data.bd_flags.bitfields =
BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
- first_bd->data.bitfields |=
- (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ first_bd->data.bitfields |= cpu_to_le16(val);
first_bd->data.nbds = 1;
/* We can safely ignore the offset, as it's 0 for XDP */
@@ -363,7 +366,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1;
@@ -393,14 +396,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl);
- idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev,
txq->sw_tx_ring.xdp[idx].mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(txq->sw_tx_ring.xdp[idx].page);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
}
@@ -430,7 +433,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
bytes_compl += len;
pkts_compl++;
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
@@ -1076,8 +1079,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
* re-use the already allcoated & mapped memory.
*/
if (len + pad <= edev->rx_copybreak) {
- memcpy(skb_put(skb, len),
- page_address(page) + offset, len);
+ skb_put_data(skb, page_address(page) + offset, len);
qede_reuse_page(rxq, bd);
goto out;
}
@@ -1424,7 +1426,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct eth_tx_2nd_bd *second_bd = NULL;
struct eth_tx_3rd_bd *third_bd = NULL;
struct eth_tx_bd *tx_data_bd = NULL;
- u16 txq_index;
+ u16 txq_index, val = 0;
u8 nbd = 0;
dma_addr_t mapping;
int rc, frag_idx = 0, ipv6_ext = 0;
@@ -1455,7 +1457,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#endif
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl);
@@ -1513,8 +1515,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- first_bd->data.bitfields |=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
}
/* Legacy FW had flipped behavior in regard to this bit -
@@ -1522,8 +1524,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* packets when it didn't need to.
*/
if (unlikely(txq->is_legacy))
- first_bd->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -1587,11 +1588,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
- first_bd->data.bitfields |=
- (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
+ first_bd->data.bitfields = cpu_to_le16(val);
+
/* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
@@ -1639,7 +1641,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Advance packet producer only before sending the packet since mapping
* of pages may fail.
*/
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
txq->tx_db.data.bd_prod =
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 38b77bbfe4ee..06ca13dd9ddb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -60,7 +60,6 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
-#include <linux/qed/qede_roce.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -259,11 +258,11 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
/* Notify qed of the name change */
if (!edev->ops || !edev->ops->common)
goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ edev->ops->common->set_name(edev->cdev, edev->ndev->name);
break;
case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev);
- qede_roce_event_changeaddr(edev);
+ qede_rdma_event_changeaddr(edev);
break;
}
@@ -580,6 +579,24 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_features_check = qede_features_check,
};
+static const struct net_device_ops qede_netdev_vf_xdp_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+ .ndo_features_check = qede_features_check,
+ .ndo_xdp = qede_xdp,
+};
+
/* -------------------------------------------------------------------------
* START OF PROBE / REMOVE
* -------------------------------------------------------------------------
@@ -618,6 +635,12 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
+ /* As ethtool doesn't have the ability to show WoL behavior as
+ * 'default', if device supports it declare it's enabled.
+ */
+ if (edev->dev_info.common.wol_support)
+ edev->wol_enabled = true;
+
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
@@ -639,10 +662,14 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->watchdog_timeo = TX_TIMEOUT;
- if (IS_VF(edev))
- ndev->netdev_ops = &qede_netdev_vf_ops;
- else
+ if (IS_VF(edev)) {
+ if (edev->dev_info.xdp_supported)
+ ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
+ else
+ ndev->netdev_ops = &qede_netdev_vf_ops;
+ } else {
ndev->netdev_ops = &qede_netdev_ops;
+ }
qede_set_ethtool_ops(ndev);
@@ -840,12 +867,55 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* Same for VFs - make sure they'll have sufficient connections
+ * to support XDP Tx queues.
+ */
+ pf_params.eth_pf_params.num_vf_cons = 48;
+
#ifdef CONFIG_RFS_ACCEL
pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
#endif
qed_ops->common->update_pf_params(cdev, &pf_params);
}
+#define QEDE_FW_VER_STR_SIZE 80
+
+static void qede_log_probe(struct qede_dev *edev)
+{
+ struct qed_dev_info *p_dev_info = &edev->dev_info.common;
+ u8 buf[QEDE_FW_VER_STR_SIZE];
+ size_t left_size;
+
+ snprintf(buf, QEDE_FW_VER_STR_SIZE,
+ "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
+ p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
+ p_dev_info->fw_eng,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
+ QED_MFW_VERSION_3_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
+ QED_MFW_VERSION_2_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
+ QED_MFW_VERSION_1_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
+ QED_MFW_VERSION_0_OFFSET);
+
+ left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
+ if (p_dev_info->mbi_version && left_size)
+ snprintf(buf + strlen(buf), left_size,
+ " [MBI %d.%d.%d]",
+ (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
+ QED_MBI_VERSION_2_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
+ QED_MBI_VERSION_1_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
+ QED_MBI_VERSION_0_OFFSET);
+
+ pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
+ PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
+ buf, edev->ndev->name);
+}
+
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
};
@@ -907,7 +977,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
- rc = qede_roce_dev_add(edev);
+ rc = qede_rdma_dev_add(edev);
if (rc)
goto err3;
@@ -924,7 +994,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err4;
}
- edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+ edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
@@ -939,12 +1009,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
- DP_INFO(edev, "Ending successfully qede probe\n");
-
+ qede_log_probe(edev);
return 0;
err4:
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -995,7 +1064,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_ptp_disable(edev);
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
edev->ops->common->set_power_state(cdev, PCI_D0);
@@ -1066,12 +1135,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
return rc;
}
-static void qede_free_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info)
+static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
+ u16 sb_id)
{
- if (sb_info->sb_virt)
+ if (sb_info->sb_virt) {
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
+ memset(sb_info, 0, sizeof(*sb_info));
+ }
}
/* This function allocates fast-path status block memory */
@@ -1244,8 +1316,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring);
-
+ &rxq->rx_bd_ring, NULL);
if (rc)
goto err;
@@ -1256,7 +1327,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring);
+ &rxq->rx_comp_ring, NULL);
if (rc)
goto err;
@@ -1298,12 +1369,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.xdp)
goto err;
} else {
- size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.skbs)
goto err;
@@ -1313,8 +1384,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- TX_RING_SIZE,
- sizeof(*p_virt), &txq->tx_pbl);
+ txq->num_tx_buffers,
+ sizeof(*p_virt),
+ &txq->tx_pbl, NULL);
if (rc)
goto err;
@@ -1328,7 +1400,7 @@ err:
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- qede_free_mem_sb(edev, fp->sb_info);
+ qede_free_mem_sb(edev, fp->sb_info, fp->id);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
@@ -1725,7 +1797,7 @@ static int qede_start_txq(struct qede_dev *edev,
else
params.queue_id = txq->index;
- params.sb = fp->sb_info->igu_sb_id;
+ params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
@@ -1804,7 +1876,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.p_sb = fp->sb_info;
q_params.sb_idx = RX_PI;
p_phys_table =
@@ -1890,9 +1962,10 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked)
__qede_lock(edev);
- qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
+ qede_rdma_dev_event_close(edev);
+
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1988,9 +2061,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
- /* Add primary mac and set Rx filters */
- ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
-
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -1999,7 +2069,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- qede_roce_dev_event_open(edev);
+ qede_rdma_dev_event_open(edev);
edev->state = QEDE_STATE_OPEN;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 24f06e2ef43e..9b2280badaf7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -244,6 +244,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
rx_filter = QED_PTP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index f00657ce7c8f..50b142fad6b8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -33,19 +33,19 @@
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/qed/qede_roce.h>
+#include <linux/qed/qede_rdma.h>
#include "qede.h"
static struct qedr_driver *qedr_drv;
static LIST_HEAD(qedr_dev_list);
static DEFINE_MUTEX(qedr_dev_list_lock);
-bool qede_roce_supported(struct qede_dev *dev)
+bool qede_rdma_supported(struct qede_dev *dev)
{
return dev->dev_info.common.rdma_supported;
}
-static void _qede_roce_dev_add(struct qede_dev *edev)
+static void _qede_rdma_dev_add(struct qede_dev *edev)
{
if (!qedr_drv)
return;
@@ -54,11 +54,11 @@ static void _qede_roce_dev_add(struct qede_dev *edev)
edev->ndev);
}
-static int qede_roce_create_wq(struct qede_dev *edev)
+static int qede_rdma_create_wq(struct qede_dev *edev)
{
- INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
- edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
- if (!edev->rdma_info.roce_wq) {
+ INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
+ edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
+ if (!edev->rdma_info.rdma_wq) {
DP_NOTICE(edev, "qedr: Could not create workqueue\n");
return -ENOMEM;
}
@@ -66,14 +66,14 @@ static int qede_roce_create_wq(struct qede_dev *edev)
return 0;
}
-static void qede_roce_cleanup_event(struct qede_dev *edev)
+static void qede_rdma_cleanup_event(struct qede_dev *edev)
{
- struct list_head *head = &edev->rdma_info.roce_event_list;
- struct qede_roce_event_work *event_node;
+ struct list_head *head = &edev->rdma_info.rdma_event_list;
+ struct qede_rdma_event_work *event_node;
- flush_workqueue(edev->rdma_info.roce_wq);
+ flush_workqueue(edev->rdma_info.rdma_wq);
while (!list_empty(head)) {
- event_node = list_entry(head->next, struct qede_roce_event_work,
+ event_node = list_entry(head->next, struct qede_rdma_event_work,
list);
cancel_work_sync(&event_node->work);
list_del(&event_node->list);
@@ -81,85 +81,85 @@ static void qede_roce_cleanup_event(struct qede_dev *edev)
}
}
-static void qede_roce_destroy_wq(struct qede_dev *edev)
+static void qede_rdma_destroy_wq(struct qede_dev *edev)
{
- qede_roce_cleanup_event(edev);
- destroy_workqueue(edev->rdma_info.roce_wq);
+ qede_rdma_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.rdma_wq);
}
-int qede_roce_dev_add(struct qede_dev *edev)
+int qede_rdma_dev_add(struct qede_dev *edev)
{
int rc = 0;
- if (qede_roce_supported(edev)) {
- rc = qede_roce_create_wq(edev);
+ if (qede_rdma_supported(edev)) {
+ rc = qede_rdma_create_wq(edev);
if (rc)
return rc;
INIT_LIST_HEAD(&edev->rdma_info.entry);
mutex_lock(&qedr_dev_list_lock);
list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
- _qede_roce_dev_add(edev);
+ _qede_rdma_dev_add(edev);
mutex_unlock(&qedr_dev_list_lock);
}
return rc;
}
-static void _qede_roce_dev_remove(struct qede_dev *edev)
+static void _qede_rdma_dev_remove(struct qede_dev *edev)
{
if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
qedr_drv->remove(edev->rdma_info.qedr_dev);
edev->rdma_info.qedr_dev = NULL;
}
-void qede_roce_dev_remove(struct qede_dev *edev)
+void qede_rdma_dev_remove(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
- qede_roce_destroy_wq(edev);
+ qede_rdma_destroy_wq(edev);
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_remove(edev);
+ _qede_rdma_dev_remove(edev);
list_del(&edev->rdma_info.entry);
mutex_unlock(&qedr_dev_list_lock);
}
-static void _qede_roce_dev_open(struct qede_dev *edev)
+static void _qede_rdma_dev_open(struct qede_dev *edev)
{
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
}
-static void qede_roce_dev_open(struct qede_dev *edev)
+static void qede_rdma_dev_open(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_open(edev);
+ _qede_rdma_dev_open(edev);
mutex_unlock(&qedr_dev_list_lock);
}
-static void _qede_roce_dev_close(struct qede_dev *edev)
+static void _qede_rdma_dev_close(struct qede_dev *edev)
{
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
}
-static void qede_roce_dev_close(struct qede_dev *edev)
+static void qede_rdma_dev_close(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_close(edev);
+ _qede_rdma_dev_close(edev);
mutex_unlock(&qedr_dev_list_lock);
}
-static void qede_roce_dev_shutdown(struct qede_dev *edev)
+static void qede_rdma_dev_shutdown(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
mutex_lock(&qedr_dev_list_lock);
@@ -168,7 +168,7 @@ static void qede_roce_dev_shutdown(struct qede_dev *edev)
mutex_unlock(&qedr_dev_list_lock);
}
-int qede_roce_register_driver(struct qedr_driver *drv)
+int qede_rdma_register_driver(struct qedr_driver *drv)
{
struct qede_dev *edev;
u8 qedr_counter = 0;
@@ -184,52 +184,52 @@ int qede_roce_register_driver(struct qedr_driver *drv)
struct net_device *ndev;
qedr_counter++;
- _qede_roce_dev_add(edev);
+ _qede_rdma_dev_add(edev);
ndev = edev->ndev;
if (netif_running(ndev) && netif_oper_up(ndev))
- _qede_roce_dev_open(edev);
+ _qede_rdma_dev_open(edev);
}
mutex_unlock(&qedr_dev_list_lock);
- pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+ pr_notice("qedr: discovered and registered %d RDMA funcs\n",
qedr_counter);
return 0;
}
-EXPORT_SYMBOL(qede_roce_register_driver);
+EXPORT_SYMBOL(qede_rdma_register_driver);
-void qede_roce_unregister_driver(struct qedr_driver *drv)
+void qede_rdma_unregister_driver(struct qedr_driver *drv)
{
struct qede_dev *edev;
mutex_lock(&qedr_dev_list_lock);
list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
if (edev->rdma_info.qedr_dev)
- _qede_roce_dev_remove(edev);
+ _qede_rdma_dev_remove(edev);
}
qedr_drv = NULL;
mutex_unlock(&qedr_dev_list_lock);
}
-EXPORT_SYMBOL(qede_roce_unregister_driver);
+EXPORT_SYMBOL(qede_rdma_unregister_driver);
-static void qede_roce_changeaddr(struct qede_dev *edev)
+static void qede_rdma_changeaddr(struct qede_dev *edev)
{
- if (!qede_roce_supported(edev))
+ if (!qede_rdma_supported(edev))
return;
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
}
-struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
- *edev)
+static struct qede_rdma_event_work *
+qede_rdma_get_free_event_node(struct qede_dev *edev)
{
- struct qede_roce_event_work *event_node = NULL;
+ struct qede_rdma_event_work *event_node = NULL;
struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.roce_event_list) {
- event_node = list_entry(list_node, struct qede_roce_event_work,
+ list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
+ event_node = list_entry(list_node, struct qede_rdma_event_work,
list);
if (!work_pending(&event_node->work)) {
found = true;
@@ -241,74 +241,74 @@ struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
if (!event_node) {
DP_NOTICE(edev,
- "qedr: Could not allocate memory for roce work\n");
+ "qedr: Could not allocate memory for rdma work\n");
return NULL;
}
list_add_tail(&event_node->list,
- &edev->rdma_info.roce_event_list);
+ &edev->rdma_info.rdma_event_list);
}
return event_node;
}
-static void qede_roce_handle_event(struct work_struct *work)
+static void qede_rdma_handle_event(struct work_struct *work)
{
- struct qede_roce_event_work *event_node;
- enum qede_roce_event event;
+ struct qede_rdma_event_work *event_node;
+ enum qede_rdma_event event;
struct qede_dev *edev;
- event_node = container_of(work, struct qede_roce_event_work, work);
+ event_node = container_of(work, struct qede_rdma_event_work, work);
event = event_node->event;
edev = event_node->ptr;
switch (event) {
case QEDE_UP:
- qede_roce_dev_open(edev);
+ qede_rdma_dev_open(edev);
break;
case QEDE_DOWN:
- qede_roce_dev_close(edev);
+ qede_rdma_dev_close(edev);
break;
case QEDE_CLOSE:
- qede_roce_dev_shutdown(edev);
+ qede_rdma_dev_shutdown(edev);
break;
case QEDE_CHANGE_ADDR:
- qede_roce_changeaddr(edev);
+ qede_rdma_changeaddr(edev);
break;
default:
- DP_NOTICE(edev, "Invalid roce event %d", event);
+ DP_NOTICE(edev, "Invalid rdma event %d", event);
}
}
-static void qede_roce_add_event(struct qede_dev *edev,
- enum qede_roce_event event)
+static void qede_rdma_add_event(struct qede_dev *edev,
+ enum qede_rdma_event event)
{
- struct qede_roce_event_work *event_node;
+ struct qede_rdma_event_work *event_node;
if (!edev->rdma_info.qedr_dev)
return;
- event_node = qede_roce_get_free_event_node(edev);
+ event_node = qede_rdma_get_free_event_node(edev);
if (!event_node)
return;
event_node->event = event;
event_node->ptr = edev;
- INIT_WORK(&event_node->work, qede_roce_handle_event);
- queue_work(edev->rdma_info.roce_wq, &event_node->work);
+ INIT_WORK(&event_node->work, qede_rdma_handle_event);
+ queue_work(edev->rdma_info.rdma_wq, &event_node->work);
}
-void qede_roce_dev_event_open(struct qede_dev *edev)
+void qede_rdma_dev_event_open(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_UP);
+ qede_rdma_add_event(edev, QEDE_UP);
}
-void qede_roce_dev_event_close(struct qede_dev *edev)
+void qede_rdma_dev_event_close(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_DOWN);
+ qede_rdma_add_event(edev, QEDE_DOWN);
}
-void qede_roce_event_changeaddr(struct qede_dev *edev)
+void qede_rdma_event_changeaddr(struct qede_dev *edev)
{
- qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+ qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 1188d420fe53..9feec7009443 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1577,7 +1577,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, hlen), addr, hlen);
+ skb_put_data(skb, addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
@@ -1654,7 +1654,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
- memcpy(skb_put(new_skb, length), skb->data, length);
+ skb_put_data(new_skb, skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
@@ -1817,8 +1817,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, length),
- sbq_desc->p.skb->data, length);
+ skb_put_data(skb, sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr
(sbq_desc,
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index d7720bf92d49..877675a27b9f 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -16,7 +16,13 @@ config NET_VENDOR_QUALCOMM
if NET_VENDOR_QUALCOMM
config QCA7000
- tristate "Qualcomm Atheros QCA7000 support"
+ tristate
+ help
+ This enables support for the Qualcomm Atheros QCA7000.
+
+config QCA7000_SPI
+ tristate "Qualcomm Atheros QCA7000 SPI support"
+ select QCA7000
depends on SPI_MASTER && OF
---help---
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
@@ -24,6 +30,22 @@ config QCA7000
To compile this driver as a module, choose M here. The module
will be called qcaspi.
+config QCA7000_UART
+ tristate "Qualcomm Atheros QCA7000 UART support"
+ select QCA7000
+ depends on SERIAL_DEV_BUS && OF
+ ---help---
+ This UART protocol driver supports the Qualcomm Atheros QCA7000.
+
+ Currently the driver assumes these device UART settings:
+ Data bits: 8
+ Parity: None
+ Stop bits: 1
+ Flow control: None
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcauart.
+
config QCOM_EMAC
tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
depends on HAS_DMA && HAS_IOMEM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index aacb0a585c68..92fa7c4da90a 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -2,7 +2,10 @@
# Makefile for the Qualcomm network device drivers.
#
-obj-$(CONFIG_QCA7000) += qcaspi.o
-qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o
+obj-$(CONFIG_QCA7000) += qca_7k_common.o
+obj-$(CONFIG_QCA7000_SPI) += qcaspi.o
+qcaspi-objs := qca_7k.o qca_debug.o qca_spi.o
+obj-$(CONFIG_QCA7000_UART) += qcauart.o
+qcauart-objs := qca_uart.o
obj-y += emac/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 18c184ee1f3c..29ba37a08372 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -297,6 +297,14 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
{}
};
+/* Dummy function for systems without an internal PHY. This avoids having
+ * to check for NULL pointers before calling the functions.
+ */
+static int emac_sgmii_dummy(struct emac_adapter *adpt)
+{
+ return 0;
+}
+
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
@@ -311,8 +319,19 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
emac_sgmii_acpi_match);
if (!dev) {
- dev_err(&pdev->dev, "cannot find internal phy node\n");
- return -ENODEV;
+ dev_warn(&pdev->dev, "cannot find internal phy node\n");
+ /* There is typically no internal PHY on emulation
+ * systems, so if we can't find the node, assume
+ * we are on an emulation system and stub-out
+ * support for the internal PHY. These systems only
+ * use ACPI.
+ */
+ phy->open = emac_sgmii_dummy;
+ phy->close = emac_sgmii_dummy;
+ phy->link_up = emac_sgmii_dummy;
+ phy->link_down = emac_sgmii_dummy;
+
+ return 0;
}
sgmii_pdev = to_platform_device(dev);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 98a326faea29..746d94e28470 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -683,8 +683,6 @@ static int emac_probe(struct platform_device *pdev)
goto err_undo_mdiobus;
}
- emac_mac_reset(adpt);
-
/* set hw features */
netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
@@ -762,6 +760,19 @@ static int emac_remove(struct platform_device *pdev)
return 0;
}
+static void emac_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Closing the SGMII turns off its interrupts */
+ sgmii->close(adpt);
+
+ /* Resetting the MAC turns off all DMA and its interrupts */
+ emac_mac_reset(adpt);
+}
+
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
.remove = emac_remove,
@@ -770,6 +781,7 @@ static struct platform_driver emac_platform_driver = {
.of_match_table = emac_dt_match,
.acpi_match_table = ACPI_PTR(emac_acpi_match),
},
+ .shutdown = emac_shutdown,
};
module_platform_driver(emac_platform_driver);
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index f0066fbb44a6..ffe7a16bdfc8 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -23,11 +23,9 @@
* kernel-based SPI device.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/spi/spi.h>
-#include <linux/version.h>
#include "qca_7k.h"
@@ -123,27 +121,3 @@ qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
return ret;
}
-
-int
-qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
-{
- __be16 tx_data;
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
- int ret;
-
- tx_data = cpu_to_be16(cmd);
- transfer->len = sizeof(tx_data);
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
-
- ret = spi_sync(qca->spi_dev, msg);
-
- if (!ret)
- ret = msg->status;
-
- if (ret)
- qcaspi_spi_error(qca);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 1cad851ee507..27124c2bb77a 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -54,19 +54,18 @@
#define SPI_REG_ACTION_CTRL 0x1B00
/* SPI_CONFIG register definition; */
-#define QCASPI_SLAVE_RESET_BIT (1 << 6)
+#define QCASPI_SLAVE_RESET_BIT BIT(6)
/* INTR_CAUSE/ENABLE register definition. */
-#define SPI_INT_WRBUF_BELOW_WM (1 << 10)
-#define SPI_INT_CPU_ON (1 << 6)
-#define SPI_INT_ADDR_ERR (1 << 3)
-#define SPI_INT_WRBUF_ERR (1 << 2)
-#define SPI_INT_RDBUF_ERR (1 << 1)
-#define SPI_INT_PKT_AVLBL (1 << 0)
+#define SPI_INT_WRBUF_BELOW_WM BIT(10)
+#define SPI_INT_CPU_ON BIT(6)
+#define SPI_INT_ADDR_ERR BIT(3)
+#define SPI_INT_WRBUF_ERR BIT(2)
+#define SPI_INT_RDBUF_ERR BIT(1)
+#define SPI_INT_PKT_AVLBL BIT(0)
void qcaspi_spi_error(struct qcaspi *qca);
int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result);
int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value);
-int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd);
#endif /* _QCA_7K_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_framing.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
index faa924c85e29..6b511f05df61 100644
--- a/drivers/net/ethernet/qualcomm/qca_framing.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -21,9 +21,11 @@
* by an atheros frame while transmitted over a serial channel;
*/
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
-#include "qca_framing.h"
+#include "qca_7k_common.h"
u16
qcafrm_create_header(u8 *buf, u16 length)
@@ -46,6 +48,7 @@ qcafrm_create_header(u8 *buf, u16 length)
return QCAFRM_HEADER_LEN;
}
+EXPORT_SYMBOL_GPL(qcafrm_create_header);
u16
qcafrm_create_footer(u8 *buf)
@@ -57,6 +60,7 @@ qcafrm_create_footer(u8 *buf)
buf[1] = 0x55;
return QCAFRM_FOOTER_LEN;
}
+EXPORT_SYMBOL_GPL(qcafrm_create_footer);
/* Gather received bytes and try to extract a full ethernet frame by
* following a simple state machine.
@@ -83,7 +87,7 @@ qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_by
if (recv_byte != 0x00) {
/* first two bytes of length must be 0 */
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
}
break;
case QCAFRM_HW_LEN2:
@@ -97,7 +101,7 @@ qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_by
case QCAFRM_WAIT_AA4:
if (recv_byte != 0xAA) {
ret = QCAFRM_NOHEAD;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state--;
}
@@ -117,9 +121,9 @@ qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_by
break;
case QCAFRM_WAIT_RSVD_BYTE2:
len = handle->offset;
- if (len > buf_len || len < QCAFRM_ETHMINLEN) {
+ if (len > buf_len || len < QCAFRM_MIN_LEN) {
ret = QCAFRM_INVLEN;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state = (enum qcafrm_state)(len + 1);
/* Remaining number of bytes. */
@@ -135,7 +139,7 @@ qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_by
case QCAFRM_WAIT_551:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state = QCAFRM_WAIT_552;
}
@@ -143,14 +147,20 @@ qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_by
case QCAFRM_WAIT_552:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
ret = handle->offset;
/* Frame is fully received. */
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
}
break;
}
return ret;
}
+EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_framing.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
index d5e795dcdf47..928554f11e35 100644
--- a/drivers/net/ethernet/qualcomm/qca_framing.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -44,12 +44,12 @@
#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4)
/* Min/Max Ethernet MTU: 46/1500 */
-#define QCAFRM_ETHMINMTU (ETH_ZLEN - ETH_HLEN)
-#define QCAFRM_ETHMAXMTU ETH_DATA_LEN
+#define QCAFRM_MIN_MTU (ETH_ZLEN - ETH_HLEN)
+#define QCAFRM_MAX_MTU ETH_DATA_LEN
/* Min/Max frame lengths */
-#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN)
-#define QCAFRM_ETHMAXLEN (QCAFRM_ETHMAXMTU + VLAN_ETH_HLEN)
+#define QCAFRM_MIN_LEN (QCAFRM_MIN_MTU + ETH_HLEN)
+#define QCAFRM_MAX_LEN (QCAFRM_MAX_MTU + VLAN_ETH_HLEN)
/* QCA7K header len */
#define QCAFRM_HEADER_LEN 8
@@ -61,6 +61,7 @@
#define QCAFRM_ERR_BASE -1000
enum qcafrm_state {
+ /* HW length is only available on SPI */
QCAFRM_HW_LEN0 = 0x8000,
QCAFRM_HW_LEN1 = QCAFRM_HW_LEN0 - 1,
QCAFRM_HW_LEN2 = QCAFRM_HW_LEN1 - 1,
@@ -101,9 +102,11 @@ enum qcafrm_state {
struct qcafrm_handle {
/* Current decoding state */
enum qcafrm_state state;
+ /* Initial state depends on connection type */
+ enum qcafrm_state init;
/* Offset in buffer (borrowed for length too) */
- s16 offset;
+ u16 offset;
/* Frame length as kept by this module */
u16 len;
@@ -113,9 +116,16 @@ u16 qcafrm_create_header(u8 *buf, u16 len);
u16 qcafrm_create_footer(u8 *buf);
-static inline void qcafrm_fsm_init(struct qcafrm_handle *handle)
+static inline void qcafrm_fsm_init_spi(struct qcafrm_handle *handle)
{
- handle->state = QCAFRM_HW_LEN0;
+ handle->init = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
+}
+
+static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
+{
+ handle->init = QCAFRM_WAIT_AA1;
+ handle->state = handle->init;
}
/* Gather received bytes and try to extract a full Ethernet frame
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index d145df98feff..92b6be9c4429 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -275,6 +275,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
static int
qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
{
+ const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
if ((ring->rx_pending) ||
@@ -283,13 +284,13 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
return -EINVAL;
if (netif_running(dev))
- qcaspi_netdev_close(dev);
+ ops->ndo_stop(dev);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
if (netif_running(dev))
- qcaspi_netdev_open(dev);
+ ops->ndo_open(dev);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 24ca7df15d07..9c236298fe21 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -43,8 +43,8 @@
#include <linux/types.h>
#include "qca_7k.h"
+#include "qca_7k_common.h"
#include "qca_debug.h"
-#include "qca_framing.h"
#include "qca_spi.h"
#define MAX_DMA_BURST_LEN 5000
@@ -69,7 +69,6 @@ static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
-#define QCASPI_MTU QCAFRM_ETHMAXMTU
#define QCASPI_TX_TIMEOUT (1 * HZ)
#define QCASPI_QCA7K_REBOOT_TIME_MS 1000
@@ -193,6 +192,30 @@ qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
}
static int
+qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
+{
+ __be16 tx_data;
+ struct spi_message *msg = &qca->spi_msg1;
+ struct spi_transfer *transfer = &qca->spi_xfer1;
+ int ret;
+
+ tx_data = cpu_to_be16(cmd);
+ transfer->len = sizeof(tx_data);
+ transfer->tx_buf = &tx_data;
+ transfer->rx_buf = NULL;
+
+ ret = spi_sync(qca->spi_dev, msg);
+
+ if (!ret)
+ ret = msg->status;
+
+ if (ret)
+ qcaspi_spi_error(qca);
+
+ return ret;
+}
+
+static int
qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
{
u32 count;
@@ -403,7 +426,7 @@ qcaspi_tx_ring_has_space(struct tx_ring *txr)
if (txr->skb[txr->tail])
return 0;
- return (txr->size + QCAFRM_ETHMAXLEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
+ return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
}
/* Flush the tx ring. This function is only safe to
@@ -603,7 +626,7 @@ qcaspi_intr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-int
+static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -615,7 +638,7 @@ qcaspi_netdev_open(struct net_device *dev)
qca->intr_req = 1;
qca->intr_svc = 0;
qca->sync = QCASPI_SYNC_UNKNOWN;
- qcafrm_fsm_init(&qca->frm_handle);
+ qcafrm_fsm_init_spi(&qca->frm_handle);
qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
qca, "%s", dev->name);
@@ -640,7 +663,7 @@ qcaspi_netdev_open(struct net_device *dev)
return 0;
}
-int
+static int
qcaspi_netdev_close(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -667,8 +690,8 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *tskb;
u8 pad_len = 0;
- if (skb->len < QCAFRM_ETHMINLEN)
- pad_len = QCAFRM_ETHMINLEN - skb->len;
+ if (skb->len < QCAFRM_MIN_LEN)
+ pad_len = QCAFRM_MIN_LEN - skb->len;
if (qca->txr.skb[qca->txr.tail]) {
netdev_warn(qca->net_dev, "queue was unexpectedly full!\n");
@@ -696,8 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
qcafrm_create_header(ptmp, frame_len);
if (pad_len) {
- ptmp = skb_put(skb, pad_len);
- memset(ptmp, 0, pad_len);
+ ptmp = skb_put_zero(skb, pad_len);
}
ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
@@ -746,7 +768,7 @@ qcaspi_netdev_init(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
- dev->mtu = QCASPI_MTU;
+ dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
@@ -805,8 +827,8 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
- dev->min_mtu = QCAFRM_ETHMINMTU;
- dev->max_mtu = QCAFRM_ETHMAXMTU;
+ dev->min_mtu = QCAFRM_MIN_MTU;
+ dev->max_mtu = QCAFRM_MAX_MTU;
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
@@ -894,6 +916,7 @@ qca_spi_probe(struct spi_device *spi)
return -ENOMEM;
qcaspi_netdev_setup(qcaspi_devs);
+ SET_NETDEV_DEV(qcaspi_devs, &spi->dev);
qca = netdev_priv(qcaspi_devs);
if (!qca) {
@@ -975,7 +998,7 @@ static struct spi_driver qca_spi_driver = {
};
module_spi_driver(qca_spi_driver);
-MODULE_DESCRIPTION("Qualcomm Atheros SPI Driver");
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 6e31a0e744a4..fc4beb1b32d1 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -32,7 +32,7 @@
#include <linux/spi/spi.h>
#include <linux/types.h>
-#include "qca_framing.h"
+#include "qca_7k_common.h"
#define QCASPI_DRV_VERSION "0.2.7-i"
#define QCASPI_DRV_NAME "qcaspi"
@@ -108,7 +108,4 @@ struct qcaspi {
u16 burst_len;
};
-int qcaspi_netdev_open(struct net_device *dev);
-int qcaspi_netdev_close(struct net_device *dev);
-
#endif /* _QCA_SPI_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
new file mode 100644
index 000000000000..db6068cd7a1f
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ * Copyright (c) 2017, I2SE GmbH
+ *
+ * Permission to use, copy, modify, and/or distribute this software
+ * for any purpose with or without fee is hereby granted, provided
+ * that the above copyright notice and this permission notice appear
+ * in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This module implements the Qualcomm Atheros UART protocol for
+ * kernel-based UART device; it is essentially an Ethernet-to-UART
+ * serial converter;
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "qca_7k_common.h"
+
+#define QCAUART_DRV_VERSION "0.1.0"
+#define QCAUART_DRV_NAME "qcauart"
+#define QCAUART_TX_TIMEOUT (1 * HZ)
+
+struct qcauart {
+ struct net_device *net_dev;
+ spinlock_t lock; /* transmit lock */
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ struct serdev_device *serdev;
+ struct qcafrm_handle frm_handle;
+ struct sk_buff *rx_skb;
+
+ unsigned char *tx_head; /* pointer to next XMIT byte */
+ int tx_left; /* bytes left in XMIT queue */
+ unsigned char *tx_buffer;
+};
+
+static int
+qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
+ size_t count)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+ struct net_device *netdev = qca->net_dev;
+ struct net_device_stats *n_stats = &netdev->stats;
+ size_t i;
+
+ if (!qca->rx_skb) {
+ qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+ netdev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb) {
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ s32 retcode;
+
+ retcode = qcafrm_fsm_decode(&qca->frm_handle,
+ qca->rx_skb->data,
+ skb_tailroom(qca->rx_skb),
+ data[i]);
+
+ switch (retcode) {
+ case QCAFRM_GATHER:
+ case QCAFRM_NOHEAD:
+ break;
+ case QCAFRM_NOTAIL:
+ netdev_dbg(netdev, "recv: no RX tail\n");
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ break;
+ case QCAFRM_INVLEN:
+ netdev_dbg(netdev, "recv: invalid RX length\n");
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ break;
+ default:
+ n_stats->rx_packets++;
+ n_stats->rx_bytes += retcode;
+ skb_put(qca->rx_skb, retcode);
+ qca->rx_skb->protocol = eth_type_trans(
+ qca->rx_skb, qca->rx_skb->dev);
+ qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx_ni(qca->rx_skb);
+ qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+ netdev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb) {
+ netdev_dbg(netdev, "recv: out of RX resources\n");
+ n_stats->rx_errors++;
+ return i;
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void qcauart_transmit(struct work_struct *work)
+{
+ struct qcauart *qca = container_of(work, struct qcauart, tx_work);
+ struct net_device_stats *n_stats = &qca->net_dev->stats;
+ int written;
+
+ spin_lock_bh(&qca->lock);
+
+ /* First make sure we're connected. */
+ if (!netif_running(qca->net_dev)) {
+ spin_unlock_bh(&qca->lock);
+ return;
+ }
+
+ if (qca->tx_left <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ n_stats->tx_packets++;
+ spin_unlock_bh(&qca->lock);
+ netif_wake_queue(qca->net_dev);
+ return;
+ }
+
+ written = serdev_device_write_buf(qca->serdev, qca->tx_head,
+ qca->tx_left);
+ if (written > 0) {
+ qca->tx_left -= written;
+ qca->tx_head += written;
+ }
+ spin_unlock_bh(&qca->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void qca_tty_wakeup(struct serdev_device *serdev)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+ schedule_work(&qca->tx_work);
+}
+
+static struct serdev_device_ops qca_serdev_ops = {
+ .receive_buf = qca_tty_receive,
+ .write_wakeup = qca_tty_wakeup,
+};
+
+static int qcauart_netdev_open(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netif_start_queue(qca->net_dev);
+
+ return 0;
+}
+
+static int qcauart_netdev_close(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ flush_work(&qca->tx_work);
+
+ spin_lock_bh(&qca->lock);
+ qca->tx_left = 0;
+ spin_unlock_bh(&qca->lock);
+
+ return 0;
+}
+
+static netdev_tx_t
+qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *n_stats = &dev->stats;
+ struct qcauart *qca = netdev_priv(dev);
+ u8 pad_len = 0;
+ int written;
+ u8 *pos;
+
+ spin_lock(&qca->lock);
+
+ WARN_ON(qca->tx_left);
+
+ if (!netif_running(dev)) {
+ spin_unlock(&qca->lock);
+ netdev_warn(qca->net_dev, "xmit: iface is down\n");
+ goto out;
+ }
+
+ pos = qca->tx_buffer;
+
+ if (skb->len < QCAFRM_MIN_LEN)
+ pad_len = QCAFRM_MIN_LEN - skb->len;
+
+ pos += qcafrm_create_header(pos, skb->len + pad_len);
+
+ memcpy(pos, skb->data, skb->len);
+ pos += skb->len;
+
+ if (pad_len) {
+ memset(pos, 0, pad_len);
+ pos += pad_len;
+ }
+
+ pos += qcafrm_create_footer(pos);
+
+ netif_stop_queue(qca->net_dev);
+
+ written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
+ pos - qca->tx_buffer);
+ if (written > 0) {
+ qca->tx_left = (pos - qca->tx_buffer) - written;
+ qca->tx_head = qca->tx_buffer + written;
+ n_stats->tx_bytes += written;
+ }
+ spin_unlock(&qca->lock);
+
+ netif_trans_update(dev);
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void qcauart_netdev_tx_timeout(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
+ jiffies, dev_trans_start(dev));
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+}
+
+static int qcauart_netdev_init(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+ size_t len;
+
+ /* Finish setting up the device info. */
+ dev->mtu = QCAFRM_MAX_MTU;
+ dev->type = ARPHRD_ETHER;
+
+ len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
+ qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
+ if (!qca->tx_buffer)
+ return -ENOMEM;
+
+ qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
+ qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb)
+ return -ENOBUFS;
+
+ return 0;
+}
+
+static void qcauart_netdev_uninit(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ if (qca->rx_skb)
+ dev_kfree_skb(qca->rx_skb);
+}
+
+static const struct net_device_ops qcauart_netdev_ops = {
+ .ndo_init = qcauart_netdev_init,
+ .ndo_uninit = qcauart_netdev_uninit,
+ .ndo_open = qcauart_netdev_open,
+ .ndo_stop = qcauart_netdev_close,
+ .ndo_start_xmit = qcauart_netdev_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = qcauart_netdev_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void qcauart_netdev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &qcauart_netdev_ops;
+ dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->tx_queue_len = 100;
+
+ /* MTU range: 46 - 1500 */
+ dev->min_mtu = QCAFRM_MIN_MTU;
+ dev->max_mtu = QCAFRM_MAX_MTU;
+}
+
+static const struct of_device_id qca_uart_of_match[] = {
+ {
+ .compatible = "qca,qca7000",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qca_uart_of_match);
+
+static int qca_uart_probe(struct serdev_device *serdev)
+{
+ struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
+ struct qcauart *qca;
+ const char *mac;
+ u32 speed = 115200;
+ int ret;
+
+ if (!qcauart_dev)
+ return -ENOMEM;
+
+ qcauart_netdev_setup(qcauart_dev);
+ SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
+
+ qca = netdev_priv(qcauart_dev);
+ if (!qca) {
+ pr_err("qca_uart: Fail to retrieve private structure\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ qca->net_dev = qcauart_dev;
+ qca->serdev = serdev;
+ qcafrm_fsm_init_uart(&qca->frm_handle);
+
+ spin_lock_init(&qca->lock);
+ INIT_WORK(&qca->tx_work, qcauart_transmit);
+
+ of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
+
+ mac = of_get_mac_address(serdev->dev.of_node);
+
+ if (mac)
+ ether_addr_copy(qca->net_dev->dev_addr, mac);
+
+ if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+ eth_hw_addr_random(qca->net_dev);
+ dev_info(&serdev->dev, "Using random MAC address: %pM\n",
+ qca->net_dev->dev_addr);
+ }
+
+ netif_carrier_on(qca->net_dev);
+ serdev_device_set_drvdata(serdev, qca);
+ serdev_device_set_client_ops(serdev, &qca_serdev_ops);
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ dev_err(&serdev->dev, "Unable to open device %s\n",
+ qcauart_dev->name);
+ goto free;
+ }
+
+ speed = serdev_device_set_baudrate(serdev, speed);
+ dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
+
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = register_netdev(qcauart_dev);
+ if (ret) {
+ dev_err(&serdev->dev, "Unable to register net device %s\n",
+ qcauart_dev->name);
+ serdev_device_close(serdev);
+ cancel_work_sync(&qca->tx_work);
+ goto free;
+ }
+
+ return 0;
+
+free:
+ free_netdev(qcauart_dev);
+ return ret;
+}
+
+static void qca_uart_remove(struct serdev_device *serdev)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+ unregister_netdev(qca->net_dev);
+
+ /* Flush any pending characters in the driver. */
+ serdev_device_close(serdev);
+ cancel_work_sync(&qca->tx_work);
+
+ free_netdev(qca->net_dev);
+}
+
+static struct serdev_device_driver qca_uart_driver = {
+ .probe = qca_uart_probe,
+ .remove = qca_uart_remove,
+ .driver = {
+ .name = QCAUART_DRV_NAME,
+ .of_match_table = of_match_ptr(qca_uart_of_match),
+ },
+};
+
+module_serdev_device_driver(qca_uart_driver);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 72233ab9474b..e7ab23e87de2 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1410,14 +1410,13 @@ static int cp_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
- int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
- return rc;
+ return 0;
}
static int cp_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0a8f2817ea60..bd07a15d3b7c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2148,7 +2148,9 @@ static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+
+ return 0;
}
static int rtl8169_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 784782da3a85..5931e859876c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1076,16 +1076,16 @@ static int ravb_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
- int error = -ENODEV;
unsigned long flags;
- if (ndev->phydev) {
- spin_lock_irqsave(&priv->lock, flags);
- error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
+ if (!ndev->phydev)
+ return -ENODEV;
- return error;
+ spin_lock_irqsave(&priv->lock, flags);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int ravb_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 2d686ccf971b..d2dc0a8ef305 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1915,16 +1915,15 @@ static int sh_eth_get_link_ksettings(struct net_device *ndev,
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
- int ret;
if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&mdp->lock, flags);
- return ret;
+ return 0;
}
static int sh_eth_set_link_ksettings(struct net_device *ndev,
@@ -2558,6 +2557,17 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
+static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ndev->mtu = new_mtu;
+ netdev_update_features(ndev);
+
+ return 0;
+}
+
/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
int entry)
@@ -3029,6 +3039,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -3043,6 +3054,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -3171,6 +3183,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
}
sh_eth_set_default_cpu_data(mdp->cd);
+ /* User's manual states max MTU should be 2048 but due to the
+ * alignment calculations in sh_eth_ring_init() the practical
+ * MTU is a bit less. Maybe this can be optimized some more.
+ */
+ ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->min_mtu = ETH_MIN_MTU;
+
/* set function */
if (mdp->cd->tsu)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index ee9675db5bf9..748fb12260a6 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -105,32 +105,27 @@ struct rocker_world_ops {
int (*port_open)(struct rocker_port *rocker_port);
void (*port_stop)(struct rocker_port *rocker_port);
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans);
+ u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
unsigned long brport_flags,
struct switchdev_trans *trans);
int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port,
unsigned long *p_brport_flags);
+ int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
- int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb);
int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
+ u16 vid, const unsigned char *addr);
int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb);
- int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb);
+ u16 vid, const unsigned char *addr);
int (*port_master_linked)(struct rocker_port *rocker_port,
struct net_device *master);
int (*port_master_unlinked)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bab13613b138..b1e5c07099fa 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1557,7 +1557,11 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- return wops->port_attr_stp_state_set(rocker_port, state, trans);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return wops->port_attr_stp_state_set(rocker_port, state);
}
static int
@@ -1569,6 +1573,10 @@ rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
trans);
}
@@ -1585,6 +1593,20 @@ rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
}
static int
+rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags_support)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_bridge_flags_support_get)
+ return -EOPNOTSUPP;
+ return wops->port_attr_bridge_flags_support_get(rocker_port,
+ p_brport_flags_support);
+}
+
+static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans)
@@ -1594,6 +1616,10 @@ rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
trans);
}
@@ -1607,7 +1633,11 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- return wops->port_obj_vlan_add(rocker_port, vlan, trans);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return wops->port_obj_vlan_add(rocker_port, vlan);
}
static int
@@ -1622,50 +1652,26 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
}
static int
-rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_vlan_dump)
- return -EOPNOTSUPP;
- return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
-}
-
-static int
-rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+rocker_world_port_fdb_add(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *info)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_fdb_add)
return -EOPNOTSUPP;
- return wops->port_obj_fdb_add(rocker_port, fdb, trans);
-}
-
-static int
-rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (!wops->port_obj_fdb_del)
- return -EOPNOTSUPP;
- return wops->port_obj_fdb_del(rocker_port, fdb);
+ return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr);
}
static int
-rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
+rocker_world_port_fdb_del(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *info)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (!wops->port_obj_fdb_dump)
+ if (!wops->port_obj_fdb_del)
return -EOPNOTSUPP;
- return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
+ return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr);
}
static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
@@ -2022,12 +2028,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
.ndo_change_mtu = rocker_port_change_mtu,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
.ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
@@ -2053,6 +2053,10 @@ static int rocker_port_attr_get(struct net_device *dev,
err = rocker_world_port_attr_bridge_flags_get(rocker_port,
&attr->u.brport_flags);
break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+ err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
+ &attr->u.brport_flags_support);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -2104,11 +2108,6 @@ static int rocker_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_add(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -2128,36 +2127,6 @@ static int rocker_port_obj_del(struct net_device *dev,
err = rocker_world_port_obj_vlan_del(rocker_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_del(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int rocker_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct rocker_port *rocker_port = netdev_priv(dev);
- int err = 0;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- cb);
- break;
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = rocker_world_port_obj_vlan_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- cb);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -2171,7 +2140,6 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_attr_set = rocker_port_attr_set,
.switchdev_port_obj_add = rocker_port_obj_add,
.switchdev_port_obj_del = rocker_port_obj_del,
- .switchdev_port_obj_dump = rocker_port_obj_dump,
};
struct rocker_fib_event_work {
@@ -2729,6 +2697,109 @@ static void rocker_msix_fini(const struct rocker *rocker)
kfree(rocker->msix_entries);
}
+static bool rocker_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+struct rocker_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct rocker_port *rocker_port;
+ unsigned long event;
+};
+
+static void
+rocker_fdb_offload_notify(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *recv_info)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = recv_info->addr;
+ info.vid = recv_info->vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ rocker_port->dev, &info.info);
+}
+
+static void rocker_switchdev_event_work(struct work_struct *work)
+{
+ struct rocker_switchdev_event_work *switchdev_work =
+ container_of(work, struct rocker_switchdev_event_work, work);
+ struct rocker_port *rocker_port = switchdev_work->rocker_port;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = rocker_world_port_fdb_add(rocker_port, fdb_info);
+ if (err) {
+ netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+ break;
+ }
+ rocker_fdb_offload_notify(rocker_port, fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = rocker_world_port_fdb_del(rocker_port, fdb_info);
+ if (err)
+ netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(rocker_port->dev);
+}
+
+/* called under rcu_read_lock() */
+static int rocker_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct rocker_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct rocker_port *rocker_port;
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ rocker_port = netdev_priv(dev);
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
+ switchdev_work->rocker_port = rocker_port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ /* Take a reference on the rocker device */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(rocker_port->rocker->rocker_owq,
+ &switchdev_work->work);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_switchdev_notifier = {
+ .notifier_call = rocker_switchdev_event,
+};
+
static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct rocker *rocker;
@@ -2834,6 +2905,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_register_fib_notifier;
+ err = register_switchdev_notifier(&rocker_switchdev_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register switchdev notifier\n");
+ goto err_register_switchdev_notifier;
+ }
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
err = rocker_probe_ports(rocker);
@@ -2848,6 +2925,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_probe_ports:
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
+err_register_switchdev_notifier:
unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
destroy_workqueue(rocker->rocker_owq);
@@ -2878,6 +2957,7 @@ static void rocker_remove(struct pci_dev *pdev)
struct rocker *rocker = pci_get_drvdata(pdev);
rocker_remove_ports(rocker);
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
@@ -2902,11 +2982,6 @@ static struct pci_driver rocker_pci_driver = {
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &rocker_port_netdev_ops;
-}
-
static bool rocker_port_dev_check_under(const struct net_device *dev,
struct rocker *rocker)
{
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index a9ce82d3e9cf..600e30e8f0be 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -300,64 +300,6 @@ static bool ofdpa_flags_nowait(int flags)
return flags & OFDPA_OP_FLAG_NOWAIT;
}
-static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
- size_t size)
-{
- struct switchdev_trans_item *elem = NULL;
- gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
- GFP_ATOMIC : GFP_KERNEL;
-
- /* If in transaction prepare phase, allocate the memory
- * and enqueue it on a transaction. If in transaction
- * commit phase, dequeue the memory from the transaction
- * rather than re-allocating the memory. The idea is the
- * driver code paths for prepare and commit are identical
- * so the memory allocated in the prepare phase is the
- * memory used in the commit phase.
- */
-
- if (!trans) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- } else if (switchdev_trans_ph_prepare(trans)) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- if (!elem)
- return NULL;
- switchdev_trans_item_enqueue(trans, elem, kfree, elem);
- } else {
- elem = switchdev_trans_item_dequeue(trans);
- }
-
- return elem ? elem + 1 : NULL;
-}
-
-static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
- size_t size)
-{
- return __ofdpa_mem_alloc(trans, flags, size);
-}
-
-static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
- size_t n, size_t size)
-{
- return __ofdpa_mem_alloc(trans, flags, n * size);
-}
-
-static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
-{
- struct switchdev_trans_item *elem;
-
- /* Frees are ignored if in transaction prepare phase. The
- * memory remains on the per-port list until freed in the
- * commit phase.
- */
-
- if (switchdev_trans_ph_prepare(trans))
- return;
-
- elem = (struct switchdev_trans_item *) mem - 1;
- kfree(elem);
-}
-
/*************************************************************
* Flow, group, FDB, internal VLAN and neigh command prepares
*************************************************************/
@@ -815,8 +757,7 @@ ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
}
static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- struct ofdpa_flow_tbl_entry *match)
+ int flags, struct ofdpa_flow_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_flow_tbl_entry *found;
@@ -831,9 +772,8 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
if (found) {
match->cookie = found->cookie;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- ofdpa_kfree(trans, found);
+ hash_del(&found->entry);
+ kfree(found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -842,22 +782,18 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
-
+ hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
- if (!switchdev_trans_ph_prepare(trans))
- return rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_flow_tbl_add,
- found, NULL, NULL);
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_add,
+ found, NULL, NULL);
return 0;
}
static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- struct ofdpa_flow_tbl_entry *match)
+ int flags, struct ofdpa_flow_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_flow_tbl_entry *found;
@@ -872,45 +808,41 @@ static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
found = ofdpa_flow_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
- ofdpa_kfree(trans, match);
+ kfree(match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- err = rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_flow_tbl_del,
- found, NULL, NULL);
- ofdpa_kfree(trans, found);
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_del,
+ found, NULL, NULL);
+ kfree(found);
}
return err;
}
-static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_flow_tbl_entry *entry)
{
if (flags & OFDPA_OP_FLAG_REMOVE)
- return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
else
- return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
}
-static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -920,11 +852,11 @@ static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+ int flags,
u32 in_pport, __be16 vlan_id,
__be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
@@ -932,7 +864,7 @@ static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -946,11 +878,10 @@ static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
entry->key.vlan.untagged = untagged;
entry->key.vlan.new_vlan_id = new_vlan_id;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -959,7 +890,7 @@ static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -983,13 +914,13 @@ static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
entry->key.term_mac.vlan_id_mask = vlan_id_mask;
entry->key.term_mac.copy_to_cpu = copy_to_cpu;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 vlan_id, u32 tunnel_id,
+ int flags, const u8 *eth_dst,
+ const u8 *eth_dst_mask, __be16 vlan_id,
+ u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
u32 group_id, bool copy_to_cpu)
{
@@ -999,7 +930,7 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
@@ -1037,11 +968,10 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
entry->key.bridge.group_id = group_id;
entry->key.bridge.copy_to_cpu = copy_to_cpu;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -1050,7 +980,7 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1065,11 +995,10 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
ucast_routing.group_id);
entry->fi = fi;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
-static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -1081,7 +1010,7 @@ static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
u32 priority;
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1116,7 +1045,7 @@ static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
entry->key.acl.ip_tos_mask = ip_tos_mask;
entry->key.acl.group_id = group_id;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static struct ofdpa_group_tbl_entry *
@@ -1134,22 +1063,20 @@ ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
return NULL;
}
-static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
- struct ofdpa_group_tbl_entry *entry)
+static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- ofdpa_kfree(trans, entry->group_ids);
+ kfree(entry->group_ids);
break;
default:
break;
}
- ofdpa_kfree(trans, entry);
+ kfree(entry);
}
-static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1161,9 +1088,8 @@ static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
found = ofdpa_group_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- ofdpa_group_tbl_entry_free(trans, found);
+ hash_del(&found->entry);
+ ofdpa_group_tbl_entry_free(found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -1171,21 +1097,17 @@ static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
+ hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
- if (!switchdev_trans_ph_prepare(trans))
- return rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_group_tbl_add,
- found, NULL, NULL);
- return 0;
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_add,
+ found, NULL, NULL);
}
-static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1198,97 +1120,90 @@ static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
found = ofdpa_group_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
- ofdpa_group_tbl_entry_free(trans, match);
+ ofdpa_group_tbl_entry_free(match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- err = rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_group_tbl_del,
- found, NULL, NULL);
- ofdpa_group_tbl_entry_free(trans, found);
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_del,
+ found, NULL, NULL);
+ ofdpa_group_tbl_entry_free(found);
}
return err;
}
-static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *entry)
{
if (flags & OFDPA_OP_FLAG_REMOVE)
- return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
else
- return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u32 out_pport,
- int pop_vlan)
+ int flags, __be16 vlan_id,
+ u32 out_pport, int pop_vlan)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, u8 group_count,
const u32 *group_ids, u32 group_id)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = ofdpa_kcalloc(trans, flags,
- group_count, sizeof(u32));
+ entry->group_ids = kcalloc(flags, group_count, sizeof(u32));
if (!entry->group_ids) {
- ofdpa_kfree(trans, entry);
+ kfree(entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u8 group_count,
- const u32 *group_ids, u32 group_id)
+ int flags, __be16 vlan_id,
+ u8 group_count, const u32 *group_ids,
+ u32 group_id)
{
- return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
+ return ofdpa_group_l2_fan_out(ofdpa_port, flags,
group_count, group_ids,
group_id);
}
-static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
u32 index, const u8 *src_mac, const u8 *dst_mac,
__be16 vlan_id, bool ttl_check, u32 pport)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1301,7 +1216,7 @@ static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
entry->l3_unicast.ttl_check = ttl_check;
entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static struct ofdpa_neigh_tbl_entry *
@@ -1318,43 +1233,34 @@ ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
}
static void ofdpa_neigh_add(struct ofdpa *ofdpa,
- struct switchdev_trans *trans,
struct ofdpa_neigh_tbl_entry *entry)
{
- if (!switchdev_trans_ph_commit(trans))
- entry->index = ofdpa->neigh_tbl_next_index++;
- if (switchdev_trans_ph_prepare(trans))
- return;
+ entry->index = ofdpa->neigh_tbl_next_index++;
entry->ref_count++;
hash_add(ofdpa->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void ofdpa_neigh_del(struct switchdev_trans *trans,
- struct ofdpa_neigh_tbl_entry *entry)
+static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- ofdpa_kfree(trans, entry);
+ kfree(entry);
}
}
static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
- struct switchdev_trans *trans,
const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else if (!switchdev_trans_ph_prepare(trans)) {
+ } else {
entry->ref_count++;
}
}
static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1371,7 +1277,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1388,12 +1294,12 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
entry->dev = ofdpa_port->dev;
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = true;
- ofdpa_neigh_add(ofdpa, trans, entry);
+ ofdpa_neigh_add(ofdpa, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- ofdpa_neigh_del(trans, found);
+ ofdpa_neigh_del(found);
} else if (updating) {
- ofdpa_neigh_update(found, trans, eth_dst, true);
+ ofdpa_neigh_update(found, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
} else {
err = -ENOENT;
@@ -1410,7 +1316,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
* other routes' nexthops.
*/
- err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
+ err = ofdpa_group_l3_unicast(ofdpa_port, flags,
entry->index,
ofdpa_port->dev->dev_addr,
entry->eth_dst,
@@ -1425,7 +1331,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
if (adding || removing) {
group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
@@ -1438,13 +1344,12 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
err_out:
if (!adding)
- ofdpa_kfree(trans, entry);
+ kfree(entry);
return err;
}
static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
__be32 ip_addr)
{
struct net_device *dev = ofdpa_port->dev;
@@ -1463,7 +1368,7 @@ static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
*/
if (n->nud_state & NUD_VALID)
- err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
+ err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
ip_addr, n->ha);
else
neigh_event_send(n, NULL);
@@ -1473,8 +1378,7 @@ static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
}
static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be32 ip_addr, u32 *index)
+ int flags, __be32 ip_addr, u32 *index)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_neigh_tbl_entry *entry;
@@ -1486,7 +1390,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
bool resolved = true;
int err = 0;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1501,14 +1405,14 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
if (adding) {
entry->ip_addr = ip_addr;
entry->dev = ofdpa_port->dev;
- ofdpa_neigh_add(ofdpa, trans, entry);
+ ofdpa_neigh_add(ofdpa, entry);
*index = entry->index;
resolved = false;
} else if (removing) {
*index = found->index;
- ofdpa_neigh_del(trans, found);
+ ofdpa_neigh_del(found);
} else if (updating) {
- ofdpa_neigh_update(found, trans, NULL, false);
+ ofdpa_neigh_update(found, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
*index = found->index;
} else {
@@ -1518,7 +1422,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
if (!adding)
- ofdpa_kfree(trans, entry);
+ kfree(entry);
if (err)
return err;
@@ -1526,7 +1430,7 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
/* Resolved means neigh ip_addr is resolved to neigh mac. */
if (!resolved)
- err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
+ err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
return err;
}
@@ -1541,7 +1445,6 @@ static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
}
static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, __be16 vlan_id)
{
struct ofdpa_port *p;
@@ -1553,7 +1456,7 @@ static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
int err = 0;
int i;
- group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
+ group_ids = kcalloc(flags, port_count, sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -1578,18 +1481,17 @@ static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
if (group_count == 0)
goto no_ports_in_vlan;
- err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
+ err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
group_count, group_ids, group_id);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- ofdpa_kfree(trans, group_ids);
+ kfree(group_ids);
return err;
}
-static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
__be16 vlan_id, bool pop_vlan)
{
const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1608,7 +1510,7 @@ static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
ofdpa_port->stp_state == BR_STATE_FORWARDING) {
out_pport = ofdpa_port->pport;
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -1632,7 +1534,7 @@ static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
return 0;
out_pport = 0;
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -1693,8 +1595,7 @@ static struct ofdpa_ctrl {
},
};
-static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = ofdpa_port->pport;
@@ -1710,7 +1611,7 @@ static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
- err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -1727,9 +1628,7 @@ static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
}
static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
- int flags,
- const struct ofdpa_ctrl *ctrl,
+ int flags, const struct ofdpa_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -1741,7 +1640,7 @@ static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
if (!ofdpa_port_is_bridged(ofdpa_port))
return 0;
- err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
ctrl->eth_dst, ctrl->eth_dst_mask,
vlan_id, tunnel_id,
goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -1752,8 +1651,7 @@ static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
return err;
}
-static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -1763,8 +1661,7 @@ static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
if (ntohs(vlan_id) == 0)
vlan_id = ofdpa_port->internal_vlan_id;
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
vlan_id_mask, ctrl->copy_to_cpu,
@@ -1776,26 +1673,24 @@ static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
return err;
}
-static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
- return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
ctrl, vlan_id);
if (ctrl->bridge)
- return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
ctrl, vlan_id);
if (ctrl->term)
- return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
ctrl, vlan_id);
return -EOPNOTSUPP;
}
-static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
__be16 vlan_id)
{
int err = 0;
@@ -1803,7 +1698,7 @@ static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
for (i = 0; i < OFDPA_CTRL_MAX; i++) {
if (ofdpa_port->ctrls[i]) {
- err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
&ofdpa_ctrls[i], vlan_id);
if (err)
return err;
@@ -1813,8 +1708,7 @@ static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
return err;
}
-static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl)
{
u16 vid;
@@ -1823,7 +1717,7 @@ static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, ofdpa_port->vlan_bitmap))
continue;
- err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
ctrl, htons(vid));
if (err)
break;
@@ -1832,8 +1726,8 @@ static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
return err;
}
-static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags, u16 vid)
+static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
+ u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -1857,43 +1751,44 @@ static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
if (adding) {
- err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
internal_vlan_id);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
- goto err_out;
+ goto err_vlan_add;
}
}
- err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
+ err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
internal_vlan_id, untagged);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
- goto err_out;
+ goto err_vlan_l2_groups;
}
- err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
+ err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
internal_vlan_id);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
- goto err_out;
+ goto err_flood_group;
}
- err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
-err_out:
- if (switchdev_trans_ph_prepare(trans))
- change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
+ return 0;
+err_vlan_add:
+err_vlan_l2_groups:
+err_flood_group:
+ change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
return err;
}
-static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -1908,7 +1803,7 @@ static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
- err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
in_pport, in_pport_mask,
goto_tbl);
if (err)
@@ -1920,7 +1815,6 @@ static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
struct ofdpa_fdb_learn_work {
struct work_struct work;
struct ofdpa_port *ofdpa_port;
- struct switchdev_trans *trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -1939,19 +1833,18 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
rtnl_lock();
if (learned && removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
lw->ofdpa_port->dev, &info.info);
else if (learned && !removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
lw->ofdpa_port->dev, &info.info);
rtnl_unlock();
- ofdpa_kfree(lw->trans, work);
+ kfree(work);
}
static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- const u8 *addr, __be16 vlan_id)
+ int flags, const u8 *addr, __be16 vlan_id)
{
struct ofdpa_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
@@ -1959,7 +1852,6 @@ static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
u32 out_pport = ofdpa_port->pport;
u32 tunnel_id = 0;
u32 group_id = ROCKER_GROUP_NONE;
- bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
bool copy_to_cpu = false;
int err;
@@ -1967,36 +1859,28 @@ static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
- err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
NULL, vlan_id, tunnel_id, goto_tbl,
group_id, copy_to_cpu);
if (err)
return err;
}
- if (!syncing)
- return 0;
-
if (!ofdpa_port_is_bridged(ofdpa_port))
return 0;
- lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
+ lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
if (!lw)
return -ENOMEM;
INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
lw->ofdpa_port = ofdpa_port;
- lw->trans = trans;
lw->flags = flags;
ether_addr_copy(lw->addr, addr);
lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_kfree(trans, lw);
- else
- schedule_work(&lw->work);
-
+ schedule_work(&lw->work);
return 0;
}
@@ -2014,7 +1898,6 @@ ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
}
static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -2024,7 +1907,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
+ fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
if (!fdb)
return -ENOMEM;
@@ -2042,32 +1925,29 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
if (found) {
found->touched = jiffies;
if (removing) {
- ofdpa_kfree(trans, fdb);
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ kfree(fdb);
+ hash_del(&found->entry);
}
} else if (!removing) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->fdb_tbl, &fdb->entry,
- fdb->key_crc32);
+ hash_add(ofdpa->fdb_tbl, &fdb->entry,
+ fdb->key_crc32);
}
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- ofdpa_kfree(trans, fdb);
+ kfree(fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
flags |= OFDPA_OP_FLAG_REFRESH;
}
- return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
+ return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
}
-static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_fdb_tbl_entry *found;
@@ -2089,13 +1969,12 @@ static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
continue;
if (!found->learned)
continue;
- err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
+ err = ofdpa_port_fdb_learn(ofdpa_port, flags,
found->key.addr,
found->key.vlan_id);
if (err)
goto err_out;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
}
err_out:
@@ -2125,8 +2004,8 @@ static void ofdpa_fdb_cleanup(unsigned long data)
ofdpa_port = entry->key.ofdpa_port;
expires = entry->touched + ofdpa_port->ageing_time;
if (time_before_eq(expires, jiffies)) {
- ofdpa_port_fdb_learn(ofdpa_port, NULL,
- flags, entry->key.addr,
+ ofdpa_port_fdb_learn(ofdpa_port, flags,
+ entry->key.addr,
entry->key.vlan_id);
hash_del(&entry->entry);
} else if (time_before(expires, next_timer)) {
@@ -2140,8 +2019,7 @@ static void ofdpa_fdb_cleanup(unsigned long data)
}
static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id)
+ int flags, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
@@ -2154,26 +2032,25 @@ static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
vlan_id = ofdpa_port->internal_vlan_id;
eth_type = htons(ETH_P_IP);
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
- eth_type, ofdpa_port->dev->dev_addr,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+ in_pport_mask, eth_type,
+ ofdpa_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
if (err)
return err;
eth_type = htons(ETH_P_IPV6);
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
- eth_type, ofdpa_port->dev->dev_addr,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+ in_pport_mask, eth_type,
+ ofdpa_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
return err;
}
-static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
{
bool pop_vlan;
u32 out_pport;
@@ -2198,7 +2075,7 @@ static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
continue;
vlan_id = htons(vid);
pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -2211,7 +2088,6 @@ static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
}
static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, u8 state)
{
bool want[OFDPA_CTRL_MAX] = { 0, };
@@ -2220,11 +2096,12 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
int err;
int i;
+ memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
prev_state = ofdpa_port->stp_state;
- if (prev_state == state)
+
+ if (ofdpa_port->stp_state == state)
return 0;
- memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
ofdpa_port->stp_state = state;
switch (state) {
@@ -2254,26 +2131,29 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
if (want[i] != ofdpa_port->ctrls[i]) {
int ctrl_flags = flags |
(want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
- err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
+ err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
&ofdpa_ctrls[i]);
if (err)
- goto err_out;
+ goto err_port_ctrl;
ofdpa_port->ctrls[i] = want[i];
}
}
- err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
+ err = ofdpa_port_fdb_flush(ofdpa_port, flags);
if (err)
- goto err_out;
+ goto err_fdb_flush;
- err = ofdpa_port_fwding(ofdpa_port, trans, flags);
+ err = ofdpa_port_fwding(ofdpa_port, flags);
+ if (err)
+ goto err_port_fwding;
-err_out:
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
- ofdpa_port->stp_state = prev_state;
- }
+ return 0;
+err_port_ctrl:
+err_fdb_flush:
+err_port_fwding:
+ memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ ofdpa_port->stp_state = prev_state;
return err;
}
@@ -2284,7 +2164,7 @@ static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ return ofdpa_port_stp_update(ofdpa_port, flags,
BR_STATE_FORWARDING);
}
@@ -2295,25 +2175,24 @@ static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ return ofdpa_port_stp_update(ofdpa_port, flags,
BR_STATE_DISABLED);
}
static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
u16 vid, u16 flags)
{
int err;
/* XXX deal with flags for PVID and untagged */
- err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
+ err = ofdpa_port_vlan(ofdpa_port, 0, vid);
if (err)
return err;
- err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
+ err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
if (err)
- ofdpa_port_vlan(ofdpa_port, trans,
+ ofdpa_port_vlan(ofdpa_port,
OFDPA_OP_FLAG_REMOVE, vid);
return err;
@@ -2324,13 +2203,13 @@ static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
{
int err;
- err = ofdpa_port_router_mac(ofdpa_port, NULL,
- OFDPA_OP_FLAG_REMOVE, htons(vid));
+ err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ htons(vid));
if (err)
return err;
- return ofdpa_port_vlan(ofdpa_port, NULL,
- OFDPA_OP_FLAG_REMOVE, vid);
+ return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ vid);
}
static struct ofdpa_internal_vlan_tbl_entry *
@@ -2389,10 +2268,9 @@ found:
return found->vlan_id;
}
-static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, __be32 dst,
- int dst_len, struct fib_info *fi,
- u32 tb_id, int flags)
+static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
+ int dst_len, struct fib_info *fi, u32 tb_id,
+ int flags)
{
const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
@@ -2414,7 +2292,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
has_gw = !!nh->nh_gw;
if (has_gw && nh_on_port) {
- err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
+ err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
nh->nh_gw, &index);
if (err)
return err;
@@ -2425,7 +2303,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
}
- err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
dst_mask, priority, goto_tbl,
group_id, fi, flags);
if (err)
@@ -2550,7 +2428,7 @@ static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
ofdpa_port->rocker_port = rocker_port;
ofdpa_port->dev = rocker_port->dev;
ofdpa_port->pport = rocker_port->pport;
- ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ ofdpa_port->brport_flags = BR_LEARNING;
ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
return 0;
}
@@ -2563,7 +2441,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
rocker_port_set_learning(rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
+ err = ofdpa_port_ig_tbl(ofdpa_port, 0);
if (err) {
netdev_err(ofdpa_port->dev, "install ig port table failed\n");
return err;
@@ -2573,7 +2451,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
ofdpa_port_internal_vlan_id_get(ofdpa_port,
ofdpa_port->dev->ifindex);
- err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err) {
netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
goto err_untagged_vlan;
@@ -2581,7 +2459,7 @@ static int ofdpa_port_init(struct rocker_port *rocker_port)
return 0;
err_untagged_vlan:
- ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+ ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
return err;
}
@@ -2589,7 +2467,7 @@ static void ofdpa_port_fini(struct rocker_port *rocker_port)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+ ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
}
static int ofdpa_port_open(struct rocker_port *rocker_port)
@@ -2607,12 +2485,11 @@ static void ofdpa_port_stop(struct rocker_port *rocker_port)
}
static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
+ return ofdpa_port_stp_update(ofdpa_port, 0, state);
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
@@ -2647,6 +2524,16 @@ ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
}
static int
+ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags_support)
+{
+ *p_brport_flags_support = BR_LEARNING;
+ return 0;
+}
+
+static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans)
@@ -2665,15 +2552,14 @@ ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
}
static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
u16 vid;
int err;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
+ err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
if (err)
return err;
}
@@ -2697,82 +2583,29 @@ static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
return 0;
}
-static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err = 0;
-
- for (vid = 1; vid < VLAN_N_VID; vid++) {
- if (!test_bit(vid, ofdpa_port->vlan_bitmap))
- continue;
- vlan->flags = 0;
- if (ofdpa_vlan_id_is_internal(htons(vid)))
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- vlan->vid_begin = vlan->vid_end = vid;
- err = cb(&vlan->obj);
- if (err)
- break;
- }
-
- return err;
-}
-
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+ u16 vid, const unsigned char *addr)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
if (!ofdpa_port_is_bridged(ofdpa_port))
return -EINVAL;
- return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
}
static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb)
+ u16 vid, const unsigned char *addr)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
int flags = OFDPA_OP_FLAG_REMOVE;
if (!ofdpa_port_is_bridged(ofdpa_port))
return -EINVAL;
- return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
-}
-
-static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- struct ofdpa_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- unsigned long lock_flags;
- int bkt;
- int err = 0;
-
- spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.ofdpa_port != ofdpa_port)
- continue;
- ether_addr_copy(fdb->addr, found->key.addr);
- fdb->ndm_state = NUD_REACHABLE;
- fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
- found->key.vlan_id);
- err = cb(&fdb->obj);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
-
- return err;
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
}
static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
@@ -2797,7 +2630,7 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
ofdpa_port->bridge_dev = bridge;
- return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
}
static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
@@ -2816,7 +2649,7 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
ofdpa_port->bridge_dev = NULL;
- err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err)
return err;
@@ -2875,7 +2708,7 @@ static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
OFDPA_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *) n->primary_key;
- return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+ return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
}
static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
@@ -2885,7 +2718,7 @@ static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *) n->primary_key;
- return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+ return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
}
static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
@@ -2899,7 +2732,7 @@ static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
ofdpa_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
}
static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
@@ -2923,7 +2756,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
if (!ofdpa_port)
return 0;
- err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, 0);
if (err)
@@ -2944,7 +2777,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
if (!ofdpa_port)
return 0;
fib_info_offload_dec(fen_info->fi);
- return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
}
@@ -2971,7 +2804,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
if (!ofdpa_port)
continue;
fib_info_offload_dec(flow_entry->fi);
- ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+ ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
@@ -2993,13 +2826,12 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
.port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
+ .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
- .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
- .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
.port_master_linked = ofdpa_port_master_linked,
.port_master_unlinked = ofdpa_port_master_unlinked,
.port_neigh_update = ofdpa_port_neigh_update,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 1e594351a60f..89831adb8eb7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1418,8 +1418,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->tx_enable_tstamp(first_desc);
}
- if (!tqueue->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 78f9e43420e0..761c518b2f92 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6069,6 +6069,7 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
init->rx_filter = HWTSTAMP_FILTER_ALL;
rc = efx_ptp_change_mode(efx, true, 0);
if (!rc)
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a0c52e328102..fcea9371ab7f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -32,8 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index c89456fa148c..e5a7a40cc8b6 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -32,8 +32,8 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
extern bool ef4_separate_tx_channels;
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c
index 92bc34c91547..55c0fbbc4fb8 100644
--- a/drivers/net/ethernet/sfc/falcon/selftest.c
+++ b/drivers/net/ethernet/sfc/falcon/selftest.c
@@ -431,8 +431,7 @@ static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue)
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
- payload = ((struct ef4_loopback_payload *)
- skb_put(skb, sizeof(state->payload)));
+ payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index f6daf09b8627..f1520a404ac6 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -425,8 +425,8 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
efx->n_tx_channels : 0));
}
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index b9422450deb8..3df872f56289 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1301,7 +1301,7 @@ static void efx_mcdi_abandon(struct efx_nic *efx)
efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
}
-/* Called from falcon_process_eventq for MCDI events */
+/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
{
@@ -1389,8 +1389,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
break;
default:
- netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
- code);
+ netif_err(efx, hw, efx->net_dev,
+ "Unknown MCDI event " EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
}
}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index dab286a337a6..f6936949fc85 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -431,8 +431,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
- payload = ((struct efx_loopback_payload *)
- skb_put(skb, sizeof(state->payload)));
+ payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3bdf87f31087..02d41eb4a8e9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -653,8 +653,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
efx->n_tx_channels : 0));
}
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 52ead5524de7..b607936e1b3e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1562,13 +1562,12 @@ static int ioc3_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
- int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
+ mii_ethtool_get_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
- return rc;
+ return 0;
}
static int ioc3_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 751c81848f35..c07fd594fe71 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -795,12 +795,12 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
}
if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
- memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
- rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
- memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
- rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
+ skb_put_data(skb, rx_ring + rx_ring_offset,
+ RX_BUF_LEN - rx_ring_offset);
+ skb_put_data(skb, rx_ring,
+ pkt_size - (RX_BUF_LEN - rx_ring_offset));
} else {
- memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
+ skb_put_data(skb, rx_ring + rx_ring_offset, pkt_size);
}
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02da106c6e04..445109bd6910 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1739,7 +1739,9 @@ static int sis190_get_link_ksettings(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
+
+ return 0;
}
static int sis190_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index db6dcb06193d..6a0e1d4b597c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1391,13 +1391,12 @@ static int netdev_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 36307d34f641..05157442a980 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1450,7 +1450,7 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
- int ret, status;
+ int status;
unsigned long flags;
u32 supported;
@@ -1458,7 +1458,7 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev,
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
supported = SUPPORTED_10baseT_Half |
@@ -1480,10 +1480,9 @@ smc911x_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported, supported);
- ret = 0;
}
- return ret;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 976aa876789a..92c927aec66d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,8 +1843,8 @@ static int smc_link_ok(struct net_device *dev)
}
}
-static int smc_netdev_get_ecmd(struct net_device *dev,
- struct ethtool_link_ksettings *ecmd)
+static void smc_netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
u16 tmp;
unsigned int ioaddr = dev->base_addr;
@@ -1865,8 +1865,6 @@ static int smc_netdev_get_ecmd(struct net_device *dev,
ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
supported);
-
- return 0;
}
static int smc_netdev_set_ecmd(struct net_device *dev,
@@ -1918,18 +1916,17 @@ static int smc_get_link_ksettings(struct net_device *dev,
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
- int ret;
unsigned long flags;
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
else
- ret = smc_netdev_get_ecmd(dev, ecmd);
+ smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irqrestore(&smc->lock, flags);
- return ret;
+ return 0;
}
static int smc_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 91e9bd7159ab..080428762858 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1539,11 +1539,10 @@ smc_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
- int ret;
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
u32 supported = SUPPORTED_10baseT_Half |
@@ -1562,11 +1561,9 @@ smc_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported, supported);
-
- ret = 0;
}
- return ret;
+ return 0;
}
static int
@@ -2488,7 +2485,7 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops smc_drv_pm_ops = {
+static const struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cfbe3634dfa1..85c0e41f8021 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -145,6 +145,17 @@ config DWMAC_SUNXI
This selects Allwinner SoC glue layer support for the
stmmac device driver. This driver is used for A20/A31
GMAC ethernet controller.
+
+config DWMAC_SUN8I
+ tristate "Allwinner sun8i GMAC support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ ---help---
+ Support for Allwinner H3 A83T A64 EMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for H3/A83T/A64
+ EMAC ethernet controller.
endif
config STMMAC_PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 700c60336674..fd4937a7fcab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b7ce3fbb5375..e82b4b70b7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -549,9 +549,11 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
- int port;
- int duplex;
- int speed;
+ u32 speed_mask;
+ u32 speed10;
+ u32 speed100;
+ u32 speed1000;
+ u32 duplex;
};
struct mii_regs {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
new file mode 100644
index 000000000000..6c2d1da05588
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -0,0 +1,1011 @@
+/*
+ * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* General notes on dwmac-sun8i:
+ * Locking: no locking is necessary in this file because all necessary locking
+ * is done in the "stmmac files"
+ */
+
+/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+ * @default_syscon_value: The default value of the EMAC register in syscon
+ * This value is used for disabling properly EMAC
+ * and used as a good starting value in case of the
+ * boot process(uboot) leave some stuff.
+ * @internal_phy: Does the MAC embed an internal PHY
+ * @support_mii: Does the MAC handle MII
+ * @support_rmii: Does the MAC handle RMII
+ * @support_rgmii: Does the MAC handle RGMII
+ */
+struct emac_variant {
+ u32 default_syscon_value;
+ int internal_phy;
+ bool support_mii;
+ bool support_rmii;
+ bool support_rgmii;
+};
+
+/* struct sunxi_priv_data - hold all sunxi private data
+ * @tx_clk: reference to MAC TX clock
+ * @ephy_clk: reference to the optional EPHY clock for the internal PHY
+ * @regulator: reference to the optional regulator
+ * @rst_ephy: reference to the optional EPHY reset for the internal PHY
+ * @variant: reference to the current board variant
+ * @regmap: regmap for using the syscon
+ * @use_internal_phy: Does the current PHY choice imply using the internal PHY
+ */
+struct sunxi_priv_data {
+ struct clk *tx_clk;
+ struct clk *ephy_clk;
+ struct regulator *regulator;
+ struct reset_control *rst_ephy;
+ const struct emac_variant *variant;
+ struct regmap *regmap;
+ bool use_internal_phy;
+};
+
+static const struct emac_variant emac_variant_h3 = {
+ .default_syscon_value = 0x58000,
+ .internal_phy = PHY_INTERFACE_MODE_MII,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true
+};
+
+static const struct emac_variant emac_variant_v3s = {
+ .default_syscon_value = 0x38000,
+ .internal_phy = PHY_INTERFACE_MODE_MII,
+ .support_mii = true
+};
+
+static const struct emac_variant emac_variant_a83t = {
+ .default_syscon_value = 0,
+ .internal_phy = 0,
+ .support_mii = true,
+ .support_rgmii = true
+};
+
+static const struct emac_variant emac_variant_a64 = {
+ .default_syscon_value = 0,
+ .internal_phy = 0,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true
+};
+
+#define EMAC_BASIC_CTL0 0x00
+#define EMAC_BASIC_CTL1 0x04
+#define EMAC_INT_STA 0x08
+#define EMAC_INT_EN 0x0C
+#define EMAC_TX_CTL0 0x10
+#define EMAC_TX_CTL1 0x14
+#define EMAC_TX_FLOW_CTL 0x1C
+#define EMAC_TX_DESC_LIST 0x20
+#define EMAC_RX_CTL0 0x24
+#define EMAC_RX_CTL1 0x28
+#define EMAC_RX_DESC_LIST 0x34
+#define EMAC_RX_FRM_FLT 0x38
+#define EMAC_MDIO_CMD 0x48
+#define EMAC_MDIO_DATA 0x4C
+#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8)
+#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8)
+#define EMAC_TX_DMA_STA 0xB0
+#define EMAC_TX_CUR_DESC 0xB4
+#define EMAC_TX_CUR_BUF 0xB8
+#define EMAC_RX_DMA_STA 0xC0
+#define EMAC_RX_CUR_DESC 0xC4
+#define EMAC_RX_CUR_BUF 0xC8
+
+/* Use in EMAC_BASIC_CTL0 */
+#define EMAC_DUPLEX_FULL BIT(0)
+#define EMAC_LOOPBACK BIT(1)
+#define EMAC_SPEED_1000 0
+#define EMAC_SPEED_100 (0x03 << 2)
+#define EMAC_SPEED_10 (0x02 << 2)
+
+/* Use in EMAC_BASIC_CTL1 */
+#define EMAC_BURSTLEN_SHIFT 24
+
+/* Used in EMAC_RX_FRM_FLT */
+#define EMAC_FRM_FLT_RXALL BIT(0)
+#define EMAC_FRM_FLT_CTL BIT(13)
+#define EMAC_FRM_FLT_MULTICAST BIT(16)
+
+/* Used in RX_CTL1*/
+#define EMAC_RX_MD BIT(1)
+#define EMAC_RX_TH_MASK GENMASK(4, 5)
+#define EMAC_RX_TH_32 0
+#define EMAC_RX_TH_64 (0x1 << 4)
+#define EMAC_RX_TH_96 (0x2 << 4)
+#define EMAC_RX_TH_128 (0x3 << 4)
+#define EMAC_RX_DMA_EN BIT(30)
+#define EMAC_RX_DMA_START BIT(31)
+
+/* Used in TX_CTL1*/
+#define EMAC_TX_MD BIT(1)
+#define EMAC_TX_NEXT_FRM BIT(2)
+#define EMAC_TX_TH_MASK GENMASK(8, 10)
+#define EMAC_TX_TH_64 0
+#define EMAC_TX_TH_128 (0x1 << 8)
+#define EMAC_TX_TH_192 (0x2 << 8)
+#define EMAC_TX_TH_256 (0x3 << 8)
+#define EMAC_TX_DMA_EN BIT(30)
+#define EMAC_TX_DMA_START BIT(31)
+
+/* Used in RX_CTL0 */
+#define EMAC_RX_RECEIVER_EN BIT(31)
+#define EMAC_RX_DO_CRC BIT(27)
+#define EMAC_RX_FLOW_CTL_EN BIT(16)
+
+/* Used in TX_CTL0 */
+#define EMAC_TX_TRANSMITTER_EN BIT(31)
+
+/* Used in EMAC_TX_FLOW_CTL */
+#define EMAC_TX_FLOW_CTL_EN BIT(0)
+
+/* Used in EMAC_INT_STA */
+#define EMAC_TX_INT BIT(0)
+#define EMAC_TX_DMA_STOP_INT BIT(1)
+#define EMAC_TX_BUF_UA_INT BIT(2)
+#define EMAC_TX_TIMEOUT_INT BIT(3)
+#define EMAC_TX_UNDERFLOW_INT BIT(4)
+#define EMAC_TX_EARLY_INT BIT(5)
+#define EMAC_RX_INT BIT(8)
+#define EMAC_RX_BUF_UA_INT BIT(9)
+#define EMAC_RX_DMA_STOP_INT BIT(10)
+#define EMAC_RX_TIMEOUT_INT BIT(11)
+#define EMAC_RX_OVERFLOW_INT BIT(12)
+#define EMAC_RX_EARLY_INT BIT(13)
+#define EMAC_RGMII_STA_INT BIT(16)
+
+#define MAC_ADDR_TYPE_DST BIT(31)
+
+/* H3 specific bits for EPHY */
+#define H3_EPHY_ADDR_SHIFT 20
+#define H3_EPHY_CLK_SEL BIT(18) /* 1: 24MHz, 0: 25MHz */
+#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */
+#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */
+#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */
+
+/* H3/A64 specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(2, 0)
+#define SYSCON_ETXDC_SHIFT 10
+#define SYSCON_ERXDC_MASK GENMASK(4, 0)
+#define SYSCON_ERXDC_SHIFT 5
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+#define SYSCON_EMAC_REG 0x30
+
+/* sun8i_dwmac_dma_reset() - reset the EMAC
+ * Called from stmmac via stmmac_dma_ops->reset
+ */
+static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + EMAC_RX_CTL1);
+ writel(0, ioaddr + EMAC_TX_CTL1);
+ writel(0, ioaddr + EMAC_RX_FRM_FLT);
+ writel(0, ioaddr + EMAC_RX_DESC_LIST);
+ writel(0, ioaddr + EMAC_TX_DESC_LIST);
+ writel(0, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+ return 0;
+}
+
+/* sun8i_dwmac_dma_init() - initialize the EMAC
+ * Called from stmmac via stmmac_dma_ops->init
+ */
+static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds)
+{
+ /* Write TX and RX descriptors address */
+ writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
+ writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
+
+ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+}
+
+/* sun8i_dwmac_dump_regs() - Dump EMAC address space
+ * Called from stmmac_dma_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space
+ * Called from stmmac_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
+ u32 *reg_space)
+{
+ int i;
+ void __iomem *ioaddr = hw->pcsr;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(0, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v &= ~EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v |= EMAC_RX_DMA_START;
+ v |= EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v &= ~EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan)
+{
+ u32 v;
+ int ret = 0;
+
+ v = readl(ioaddr + EMAC_INT_STA);
+
+ if (v & EMAC_TX_INT) {
+ ret |= handle_tx;
+ x->tx_normal_irq_n++;
+ }
+
+ if (v & EMAC_TX_DMA_STOP_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_BUF_UA_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_TX_UNDERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->tx_undeflow_irq++;
+ }
+
+ if (v & EMAC_TX_EARLY_INT)
+ x->tx_early_irq++;
+
+ if (v & EMAC_RX_INT) {
+ ret |= handle_rx;
+ x->rx_normal_irq_n++;
+ }
+
+ if (v & EMAC_RX_BUF_UA_INT)
+ x->rx_buf_unav_irq++;
+
+ if (v & EMAC_RX_DMA_STOP_INT)
+ x->rx_process_stopped_irq++;
+
+ if (v & EMAC_RX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_RX_OVERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->rx_overflow_irq++;
+ }
+
+ if (v & EMAC_RX_EARLY_INT)
+ x->rx_early_irq++;
+
+ if (v & EMAC_RGMII_STA_INT)
+ x->irq_rgmii_n++;
+
+ writel(v, ioaddr + EMAC_INT_STA);
+
+ return ret;
+}
+
+static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, int rxfifosz)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ if (txmode == SF_DMA_MODE) {
+ v |= EMAC_TX_MD;
+ /* Undocumented bit (called TX_NEXT_FRM in BSP), the original
+ * comment is
+ * "Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used."
+ */
+ v |= EMAC_TX_NEXT_FRM;
+ } else {
+ v &= ~EMAC_TX_MD;
+ v &= ~EMAC_TX_TH_MASK;
+ if (txmode < 64)
+ v |= EMAC_TX_TH_64;
+ else if (txmode < 128)
+ v |= EMAC_TX_TH_128;
+ else if (txmode < 192)
+ v |= EMAC_TX_TH_192;
+ else if (txmode < 256)
+ v |= EMAC_TX_TH_256;
+ }
+ writel(v, ioaddr + EMAC_TX_CTL1);
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (rxmode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (rxmode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (rxmode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (rxmode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (rxmode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
+ .reset = sun8i_dwmac_dma_reset,
+ .init = sun8i_dwmac_dma_init,
+ .dump_regs = sun8i_dwmac_dump_regs,
+ .dma_mode = sun8i_dwmac_dma_operation_mode,
+ .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
+ .enable_dma_irq = sun8i_dwmac_enable_dma_irq,
+ .disable_dma_irq = sun8i_dwmac_disable_dma_irq,
+ .start_tx = sun8i_dwmac_dma_start_tx,
+ .stop_tx = sun8i_dwmac_dma_stop_tx,
+ .start_rx = sun8i_dwmac_dma_start_rx,
+ .stop_rx = sun8i_dwmac_dma_stop_rx,
+ .dma_interrupt = sun8i_dwmac_dma_interrupt,
+};
+
+static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to enable regulator\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+ dev_err(&pdev->dev, "Could not enable AHB clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */
+ writel(v, ioaddr + EMAC_BASIC_CTL1);
+}
+
+static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 t, r;
+
+ t = readl(ioaddr + EMAC_TX_CTL0);
+ r = readl(ioaddr + EMAC_RX_CTL0);
+ if (enable) {
+ t |= EMAC_TX_TRANSMITTER_EN;
+ r |= EMAC_RX_RECEIVER_EN;
+ } else {
+ t &= ~EMAC_TX_TRANSMITTER_EN;
+ r &= ~EMAC_RX_RECEIVER_EN;
+ }
+ writel(t, ioaddr + EMAC_TX_CTL0);
+ writel(r, ioaddr + EMAC_RX_CTL0);
+}
+
+/* Set MAC address at slot reg_n
+ * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST
+ * If addr is NULL, clear the slot
+ */
+static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ if (!addr) {
+ writel(0, ioaddr + EMAC_MACADDR_HI(reg_n));
+ return;
+ }
+
+ stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+ if (reg_n > 0) {
+ v = readl(ioaddr + EMAC_MACADDR_HI(reg_n));
+ v |= MAC_ADDR_TYPE_DST;
+ writel(v, ioaddr + EMAC_MACADDR_HI(reg_n));
+ }
+}
+
+static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+}
+
+/* caution this function must return non 0 to work */
+static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ v |= EMAC_RX_DO_CRC;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ return 1;
+}
+
+static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+ int i = 1;
+ struct netdev_hw_addr *ha;
+ int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1;
+
+ v = EMAC_FRM_FLT_CTL;
+
+ if (dev->flags & IFF_PROMISC) {
+ v = EMAC_FRM_FLT_RXALL;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ v |= EMAC_FRM_FLT_MULTICAST;
+ } else if (macaddrs <= hw->unicast_filter_entries) {
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ if (!netdev_uc_empty(dev)) {
+ netdev_for_each_uc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ } else {
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
+ v = EMAC_FRM_FLT_RXALL;
+ }
+
+ /* Disable unused address filter slots */
+ while (i < hw->unicast_filter_entries)
+ sun8i_dwmac_set_umac_addr(hw, NULL, i++);
+
+ writel(v, ioaddr + EMAC_RX_FRM_FLT);
+}
+
+static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw,
+ unsigned int duplex, unsigned int fc,
+ unsigned int pause_time, u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_RX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_RX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ v = readl(ioaddr + EMAC_TX_FLOW_CTL);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_TX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_TX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_TX_FLOW_CTL);
+}
+
+static int sun8i_dwmac_reset(struct stmmac_priv *priv)
+{
+ u32 v;
+ int err;
+
+ v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
+ writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
+
+ /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
+ * need more if no cable plugged. 100ms seems OK
+ */
+ err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
+ !(v & 0x01), 100, 100000);
+
+ if (err) {
+ dev_err(priv->device, "EMAC reset timeout\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ struct device_node *node = priv->device->of_node;
+ int ret, phy_interface;
+ u32 reg, val;
+
+ regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
+ reg = gmac->variant->default_syscon_value;
+ if (reg != val)
+ dev_warn(priv->device,
+ "Current syscon value is not the default %x (expect %x)\n",
+ val, reg);
+
+ if (gmac->variant->internal_phy) {
+ if (!gmac->use_internal_phy) {
+ /* switch to external PHY interface */
+ reg &= ~H3_EPHY_SELECT;
+ } else {
+ reg |= H3_EPHY_SELECT;
+ reg &= ~H3_EPHY_SHUTDOWN;
+ dev_dbg(priv->device, "Select internal_phy %x\n", reg);
+
+ if (of_property_read_bool(priv->plat->phy_node,
+ "allwinner,leds-active-low"))
+ reg |= H3_EPHY_LED_POL;
+ else
+ reg &= ~H3_EPHY_LED_POL;
+
+ /* Force EPHY xtal frequency to 24MHz. */
+ reg |= H3_EPHY_CLK_SEL;
+
+ ret = of_mdio_parse_addr(priv->device,
+ priv->plat->phy_node);
+ if (ret < 0) {
+ dev_err(priv->device, "Could not parse MDIO addr\n");
+ return ret;
+ }
+ /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+ * address. No need to mask it again.
+ */
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
+ }
+ }
+
+ if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ if (val <= SYSCON_ETXDC_MASK) {
+ reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
+ reg |= (val << SYSCON_ETXDC_SHIFT);
+ } else {
+ dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ if (val <= SYSCON_ERXDC_MASK) {
+ reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
+ reg |= (val << SYSCON_ERXDC_SHIFT);
+ } else {
+ dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ /* Clear interface mode bits */
+ reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
+ if (gmac->variant->support_rmii)
+ reg &= ~SYSCON_RMII_EN;
+
+ phy_interface = priv->plat->interface;
+ /* if PHY is internal, select the mode (xMII) used by the SoC */
+ if (gmac->use_internal_phy)
+ phy_interface = gmac->variant->internal_phy;
+ switch (phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
+ break;
+ default:
+ dev_err(priv->device, "Unsupported interface mode: %s",
+ phy_modes(priv->plat->interface));
+ return -EINVAL;
+ }
+
+ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+
+ return 0;
+}
+
+static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
+{
+ u32 reg = gmac->variant->default_syscon_value;
+
+ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+}
+
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ int ret;
+
+ if (!gmac->use_internal_phy)
+ return 0;
+
+ ret = clk_prepare_enable(gmac->ephy_clk);
+ if (ret) {
+ dev_err(priv->device, "Cannot enable ephy\n");
+ return ret;
+ }
+
+ /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ * it at deasserted state, and thus it may fail to reset EMAC.
+ */
+ reset_control_assert(gmac->rst_ephy);
+
+ ret = reset_control_deassert(gmac->rst_ephy);
+ if (ret) {
+ dev_err(priv->device, "Cannot deassert ephy\n");
+ clk_disable_unprepare(gmac->ephy_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+{
+ if (!gmac->use_internal_phy)
+ return 0;
+
+ clk_disable_unprepare(gmac->ephy_clk);
+ reset_control_assert(gmac->rst_ephy);
+ return 0;
+}
+
+/* sun8i_power_phy() - Activate the PHY:
+ * In case of error, no need to call sun8i_unpower_phy(),
+ * it will be called anyway by sun8i_dwmac_exit()
+ */
+static int sun8i_power_phy(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = sun8i_dwmac_power_internal_phy(priv);
+ if (ret)
+ return ret;
+
+ ret = sun8i_dwmac_set_syscon(priv);
+ if (ret)
+ return ret;
+
+ /* After changing syscon value, the MAC need reset or it will use
+ * the last value (and so the last PHY set.
+ */
+ ret = sun8i_dwmac_reset(priv);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void sun8i_unpower_phy(struct sunxi_priv_data *gmac)
+{
+ sun8i_dwmac_unset_syscon(gmac);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+}
+
+static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ sun8i_unpower_phy(gmac);
+
+ clk_disable_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static const struct stmmac_ops sun8i_dwmac_ops = {
+ .core_init = sun8i_dwmac_core_init,
+ .set_mac = sun8i_dwmac_set_mac,
+ .dump_regs = sun8i_dwmac_dump_mac_regs,
+ .rx_ipc = sun8i_dwmac_rx_ipc_enable,
+ .set_filter = sun8i_dwmac_set_filter,
+ .flow_ctrl = sun8i_dwmac_flow_ctrl,
+ .set_umac_addr = sun8i_dwmac_set_umac_addr,
+ .get_umac_addr = sun8i_dwmac_get_umac_addr,
+};
+
+static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+{
+ struct mac_device_info *mac;
+ struct stmmac_priv *priv = ppriv;
+ int ret;
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ ret = sun8i_power_phy(priv);
+ if (ret)
+ return NULL;
+
+ mac->pcsr = priv->ioaddr;
+ mac->mac = &sun8i_dwmac_ops;
+ mac->dma = &sun8i_dwmac_dma_ops;
+
+ /* The loopback bit seems to be re-set when link change
+ * Simply mask it each time
+ * Speed 10/100/1000 are set in BIT(2)/BIT(3)
+ */
+ mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK;
+ mac->link.speed10 = EMAC_SPEED_10;
+ mac->link.speed100 = EMAC_SPEED_100;
+ mac->link.speed1000 = EMAC_SPEED_1000;
+ mac->link.duplex = EMAC_DUPLEX_FULL;
+ mac->mii.addr = EMAC_MDIO_CMD;
+ mac->mii.data = EMAC_MDIO_DATA;
+ mac->mii.reg_shift = 4;
+ mac->mii.reg_mask = GENMASK(8, 4);
+ mac->mii.addr_shift = 12;
+ mac->mii.addr_mask = GENMASK(16, 12);
+ mac->mii.clk_csr_shift = 20;
+ mac->mii.clk_csr_mask = GENMASK(22, 20);
+ mac->unicast_filter_entries = 8;
+
+ /* Synopsys Id is not available */
+ priv->synopsys_id = 0;
+
+ return mac;
+}
+
+static int sun8i_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->variant = of_device_get_match_data(&pdev->dev);
+ if (!gmac->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n");
+ return -EINVAL;
+ }
+
+ gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "Could not get TX clock\n");
+ return PTR_ERR(gmac->tx_clk);
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(gmac->regmap)) {
+ ret = PTR_ERR(gmac->regmap);
+ dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
+ return ret;
+ }
+
+ plat_dat->interface = of_get_phy_mode(dev->of_node);
+ if (plat_dat->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ dev_info(&pdev->dev, "Will use internal PHY\n");
+ gmac->use_internal_phy = true;
+ gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
+ if (IS_ERR(gmac->ephy_clk)) {
+ ret = PTR_ERR(gmac->ephy_clk);
+ dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret);
+ return -EINVAL;
+ }
+
+ gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL);
+ if (IS_ERR(gmac->rst_ephy)) {
+ ret = PTR_ERR(gmac->rst_ephy);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(&pdev->dev, "No EPHY reset control found %d\n",
+ ret);
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "Will use external PHY\n");
+ gmac->use_internal_phy = false;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers.
+ */
+ plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
+ plat_dat->tx_coe = 1;
+ plat_dat->has_sun8i = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun8i_dwmac_init;
+ plat_dat->exit = sun8i_dwmac_exit;
+ plat_dat->setup = sun8i_dwmac_setup;
+
+ ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+
+ return ret;
+}
+
+static const struct of_device_id sun8i_dwmac_match[] = {
+ { .compatible = "allwinner,sun8i-h3-emac",
+ .data = &emac_variant_h3 },
+ { .compatible = "allwinner,sun8i-v3s-emac",
+ .data = &emac_variant_v3s },
+ { .compatible = "allwinner,sun8i-a83t-emac",
+ .data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun50i-a64-emac",
+ .data = &emac_variant_a64 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
+
+static struct platform_driver sun8i_dwmac_driver = {
+ .probe = sun8i_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "dwmac-sun8i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun8i_dwmac_match,
+ },
+};
+module_platform_driver(sun8i_dwmac_driver);
+
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
+MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index f3d9305e5f70..8a86340ff2d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -45,15 +45,17 @@ static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
if (hw->ps) {
value |= GMAC_CONTROL_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONTROL_PS;
- } else {
- value |= GMAC_CONTROL_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONTROL_FES;
- else
- value |= GMAC_CONTROL_FES;
+ value &= ~hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -531,9 +533,11 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->mac = &dwmac1000_ops;
mac->dma = &dwmac1000_dma_ops;
- mac->link.port = GMAC_CONTROL_PS;
mac->link.duplex = GMAC_CONTROL_DM;
- mac->link.speed = GMAC_CONTROL_FES;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 471a9aa6ac94..22cf6353ba04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -205,8 +205,8 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
{
int i;
- for (i = 0; i < 22; i++)
- if ((i < 9) || (i > 17))
+ for (i = 0; i < 23; i++)
+ if ((i < 12) || (i > 17))
reg_space[DMA_BUS_MODE / 4 + i] =
readl(ioaddr + DMA_BUS_MODE + i * 4);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 1b3609105484..8ef517356313 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,9 +175,11 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->mac = &dwmac100_ops;
mac->dma = &dwmac100_dma_ops;
- mac->link.port = MAC_CONTROL_PS;
mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = MAC_CONTROL_PS;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 48793f2e9307..f233bf8b4ebb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -35,15 +35,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
if (hw->ps) {
value |= GMAC_CONFIG_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONFIG_PS;
- } else {
- value |= GMAC_CONFIG_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONFIG_FES;
- else
- value |= GMAC_CONFIG_FES;
+ value &= hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -747,9 +749,11 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->link.port = GMAC_CONFIG_PS;
mac->link.duplex = GMAC_CONFIG_DM;
- mac->link.speed = GMAC_CONFIG_FES;
+ mac->link.speed10 = GMAC_CONFIG_PS;
+ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
mac->mii.addr_shift = 21;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index eec8463057fd..e84831e1b63b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,9 +71,9 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
{
u32 value;
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
@@ -85,9 +85,9 @@ void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
-void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
{
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -99,8 +99,8 @@ void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
-void dwmac4_dma_init_channel(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, u32 chan)
+static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 38f94305aab5..67af0bdd7f10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -248,6 +248,7 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
}
+EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
@@ -279,4 +280,4 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
addr[4] = hi_addr & 0xff;
addr[5] = (hi_addr >> 8) & 0xff;
}
-
+EXPORT_SYMBOL_GPL(stmmac_get_mac_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 33efe7038cab..a916e13624eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -104,7 +104,7 @@ struct stmmac_priv {
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- int oldlink;
+ bool oldlink;
int speed;
int oldduplex;
unsigned int flow_ctrl;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 16808e48ca1c..babb39c646ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -29,7 +29,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
-#define REG_SPACE_SIZE 0x1054
+#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
@@ -273,7 +273,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phy = dev->phydev;
- int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
@@ -364,8 +363,8 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
"link speed / duplex setting\n", dev->name);
return -EBUSY;
}
- rc = phy_ethtool_ksettings_get(phy, cmd);
- return rc;
+ phy_ethtool_ksettings_get(phy, cmd);
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e4cbc6ce0ef..19bba6281dab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -235,6 +235,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
+
+ if (priv->plat->has_sun8i) {
+ if (clk_rate > 160000000)
+ priv->clk_csr = 0x03;
+ else if (clk_rate > 80000000)
+ priv->clk_csr = 0x02;
+ else if (clk_rate > 40000000)
+ priv->clk_csr = 0x01;
+ else
+ priv->clk_csr = 0;
+ }
}
static void print_pkt(unsigned char *buf, int len)
@@ -653,6 +664,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
ptp_over_ethernet = PTP_TCR_TSIPENA;
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
config.rx_filter = HWTSTAMP_FILTER_ALL;
@@ -783,7 +795,7 @@ static void stmmac_adjust_link(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
unsigned long flags;
- int new_state = 0;
+ bool new_state = false;
if (!phydev)
return;
@@ -796,8 +808,8 @@ static void stmmac_adjust_link(struct net_device *dev)
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
+ new_state = true;
+ if (!phydev->duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
@@ -808,30 +820,17 @@ static void stmmac_adjust_link(struct net_device *dev)
stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
- new_state = 1;
+ new_state = true;
+ ctrl &= ~priv->hw->link.speed_mask;
switch (phydev->speed) {
- case 1000:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4)
- ctrl &= ~priv->hw->link.port;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
break;
- case 100:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl |= priv->hw->link.speed;
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
break;
- case 10:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl &= ~(priv->hw->link.speed);
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
break;
default:
netif_warn(priv, link, priv->dev,
@@ -847,12 +846,12 @@ static void stmmac_adjust_link(struct net_device *dev)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
+ new_state = true;
+ priv->oldlink = true;
}
} else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
+ new_state = true;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
}
@@ -915,7 +914,7 @@ static int stmmac_init_phy(struct net_device *dev)
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
int max_speed = priv->plat->max_speed;
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
@@ -2895,8 +2894,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
@@ -2974,7 +2972,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
}
@@ -3105,8 +3103,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
@@ -3969,7 +3966,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
struct mac_device_info *mac;
/* Identify the MAC HW device */
- if (priv->plat->has_gmac) {
+ if (priv->plat->setup) {
+ mac = priv->plat->setup(priv);
+ } else if (priv->plat->has_gmac) {
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
@@ -3989,6 +3988,10 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->hw = mac;
+ /* dwmac-sun8i only work in chain mode */
+ if (priv->plat->has_sun8i)
+ chain_mode = 1;
+
/* To use the chained or ring mode */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->mode = &dwmac4_ring_mode_ops;
@@ -4135,7 +4138,7 @@ int stmmac_dvr_probe(struct device *device,
NETIF_F_RXCSUM;
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
- ndev->hw_features |= NETIF_F_TSO;
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
@@ -4314,7 +4317,7 @@ int stmmac_suspend(struct device *dev)
}
spin_unlock_irqrestore(&priv->lock, flags);
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 22f910795be4..8d375e51a526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -30,42 +30,39 @@
* negative value of the address means that MAC controller is not connected
* with PHY.
*/
-struct stmmac_pci_dmi_data {
- const char *name;
- const char *asset_tag;
+struct stmmac_pci_func_data {
unsigned int func;
int phy_addr;
};
+struct stmmac_pci_dmi_data {
+ const struct stmmac_pci_func_data *func;
+ size_t nfuncs;
+};
+
struct stmmac_pci_info {
- struct pci_dev *pdev;
- int (*setup)(struct plat_stmmacenet_data *plat,
- struct stmmac_pci_info *info);
- struct stmmac_pci_dmi_data *dmi;
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
-static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
+ const struct dmi_system_id *dmi_list)
{
- const char *name = dmi_get_system_info(DMI_BOARD_NAME);
- const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
- unsigned int func = PCI_FUNC(info->pdev->devfn);
- struct stmmac_pci_dmi_data *dmi;
+ const struct stmmac_pci_func_data *func_data;
+ const struct stmmac_pci_dmi_data *dmi_data;
+ const struct dmi_system_id *dmi_id;
+ int func = PCI_FUNC(pdev->devfn);
+ size_t n;
- /*
- * Galileo boards with old firmware don't support DMI. We always return
- * 1 here, so at least first found MAC controller would be probed.
- */
- if (!name)
- return 1;
-
- for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
- if (!strcmp(dmi->name, name) && dmi->func == func) {
- /* If asset tag is provided, match on it as well. */
- if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
- continue;
- return dmi->phy_addr;
- }
- }
+ dmi_id = dmi_first_match(dmi_list);
+ if (!dmi_id)
+ return -ENODEV;
+
+ dmi_data = dmi_id->driver_data;
+ func_data = dmi_data->func;
+
+ for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
+ if (func_data->func == func)
+ return func_data->phy_addr;
return -ENODEV;
}
@@ -100,7 +97,8 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->rx_queues_cfg[0].pkt_route = 0x0;
}
-static void stmmac_default_data(struct plat_stmmacenet_data *plat)
+static int stmmac_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
/* Set common default data first */
common_default_data(plat);
@@ -112,12 +110,77 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
/* TODO: AXI */
+
+ return 0;
}
-static int quark_default_data(struct plat_stmmacenet_data *plat,
- struct stmmac_pci_info *info)
+static const struct stmmac_pci_info stmmac_pci_info = {
+ .setup = stmmac_default_data,
+};
+
+static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
+ .func = galileo_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
+};
+
+static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .func = 7,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
+ .func = iot2040_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
+};
+
+static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-1YA2"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+ {}
+};
+
+static int quark_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
- struct pci_dev *pdev = info->pdev;
int ret;
/* Set common default data first */
@@ -127,9 +190,19 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
* Refuse to load the driver and register net device if MAC controller
* does not connect to any PHY interface.
*/
- ret = stmmac_pci_find_phy_addr(info);
- if (ret < 0)
- return ret;
+ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
+ if (ret < 0) {
+ /* Return error to the caller on DMI enabled boards. */
+ if (dmi_get_system_info(DMI_BOARD_NAME))
+ return ret;
+
+ /*
+ * Galileo boards with old firmware don't support DMI. We always
+ * use 1 here as PHY address, so at least the first found MAC
+ * controller would be probed.
+ */
+ ret = 1;
+ }
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
plat->phy_addr = ret;
@@ -143,41 +216,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
return 0;
}
-static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
- {
- .name = "Galileo",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "GalileoGen2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-0YA2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
- .func = 6,
- .phy_addr = 1,
- },
- {
- .name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
- .func = 7,
- .phy_addr = 1,
- },
- {}
-};
-
-static struct stmmac_pci_info quark_pci_info = {
+static const struct stmmac_pci_info quark_pci_info = {
.setup = quark_default_data,
- .dmi = quark_pci_dmi_data,
};
/**
@@ -236,15 +276,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (info) {
- info->pdev = pdev;
- if (info->setup) {
- ret = info->setup(plat, info);
- if (ret)
- return ret;
- }
- } else
- stmmac_default_data(plat);
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
pci_enable_msi(pdev);
@@ -270,14 +304,21 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
-#define STMMAC_VENDOR_ID 0x700
+/* synthetic ID, no official vendor */
+#define PCI_VENDOR_ID_STMMAC 0x700
+
#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
+#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
+ PCI_VDEVICE(vendor_id, dev_id), \
+ .driver_data = (kernel_ulong_t)&info \
+ }
+
static const struct pci_device_id stmmac_id_table[] = {
- {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
- {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
+ STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
+ STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 7fc3a1ef395a..a366b3747eeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -309,6 +309,13 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
bool mdio = true;
+ static const struct of_device_id need_mdio_ids[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10" },
+ { .compatible = "allwinner,sun8i-a83t-emac" },
+ { .compatible = "allwinner,sun8i-h3-emac" },
+ { .compatible = "allwinner,sun8i-v3s-emac" },
+ { .compatible = "allwinner,sun50i-a64-emac" },
+ };
/* If phy-handle property is passed from DT, use it as the PHY */
plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -325,8 +332,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = false;
}
- /* exception for dwmac-dwc-qos-eth glue logic */
- if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ if (of_match_node(need_mdio_ids, np)) {
plat->mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5b56c24b6ed2..8603e397097e 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -248,7 +248,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
dev->ethtool_ops = &vsw_ethtool_ops;
dev->watchdog_timeo = VSW_TX_TIMEOUT;
- dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 2dcca249eb9c..46cb7f8955a2 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6667,7 +6667,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
headroom = align + sizeof(struct tx_pkt_hdr);
ehdr = (struct ethhdr *) skb->data;
- tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
+ tp = skb_push(skb, headroom);
len = skb->len - sizeof(struct tx_pkt_hdr);
tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 0b95105f7060..75b167e3fe98 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
dev->watchdog_timeo = VNET_TX_TIMEOUT;
dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
- NETIF_F_HW_CSUM | NETIF_F_SG;
+ NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f4d7aec50479..1850e348f555 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1236,6 +1236,7 @@ static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
{
struct cpsw_common *cpsw = priv->cpsw;
+ skb_tx_timestamp(skb);
return cpdma_chan_submit(txch, skb, skb->data, skb->len,
priv->emac_port + cpsw->data.dual_emac);
}
@@ -1597,6 +1598,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
struct netdev_queue *txq;
struct cpdma_chan *txch;
int ret, q_idx;
@@ -1608,11 +1610,9 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpsw->cpts))
+ cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- skb_tx_timestamp(skb);
-
q_idx = skb_get_queue_mapping(skb);
if (q_idx >= cpsw->tx_ch_num)
q_idx = q_idx % cpsw->tx_ch_num;
@@ -1731,10 +1731,14 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpts_rx_enable(cpts, 0);
break;
case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- return -ERANGE;
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1744,7 +1748,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, 1);
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
@@ -1783,7 +1787,7 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
cfg.tx_type = cpts_is_tx_enabled(cpts) ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+ cpts->rx_enable : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2140,6 +2144,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
@@ -2165,11 +2170,11 @@ static int cpsw_get_link_ksettings(struct net_device *ndev,
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy,
- ecmd);
- else
+ if (!cpsw->slaves[slave_no].phy)
return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+ return 0;
}
static int cpsw_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index c96eca2b1b46..01ea82ba9cdc 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -30,6 +30,7 @@
#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
+#include <linux/ptp_classify.h>
#include <linux/timecounter.h>
struct cpsw_cpts {
@@ -155,6 +156,16 @@ static inline bool cpts_is_tx_enabled(struct cpts *cpts)
return !!cpts->tx_enable;
}
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb);
+
+ if (class == PTP_CLASS_NONE)
+ return false;
+
+ return true;
+}
+
#else
struct cpts;
@@ -203,6 +214,11 @@ static inline bool cpts_is_tx_enabled(struct cpts *cpts)
{
return false;
}
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 7ecc6b70e7e8..e4d6edf387b3 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -645,7 +645,7 @@ EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
{
unsigned long flags;
- int i, reg;
+ int i;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_ACTIVE) {
@@ -653,9 +653,6 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
return -EINVAL;
}
- reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
- dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
-
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_int_ctrl(ctlr->channels[i], enable);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e6222e535019..9d52c3a78621 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1877,8 +1877,8 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
u8 num_tc;
int i;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index dd92950a4615..28cb38af1a34 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1927,7 +1927,6 @@ static int keystone_get_link_ksettings(struct net_device *ndev,
struct netcp_intf *netcp = netdev_priv(ndev);
struct phy_device *phy = ndev->phydev;
struct gbe_intf *gbe_intf;
- int ret;
if (!phy)
return -EINVAL;
@@ -1939,11 +1938,10 @@ static int keystone_get_link_ksettings(struct net_device *ndev,
if (!gbe_intf->slave)
return -EINVAL;
- ret = phy_ethtool_ksettings_get(phy, cmd);
- if (!ret)
- cmd->base.port = gbe_intf->slave->phy_port_t;
+ phy_ethtool_ksettings_get(phy, cmd);
+ cmd->base.port = gbe_intf->slave->phy_port_t;
- return ret;
+ return 0;
}
static int keystone_set_link_ksettings(struct net_device *ndev,
@@ -2505,24 +2503,8 @@ static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
const struct netcp_packet *p_info)
{
struct sk_buff *skb = p_info->skb;
- unsigned int class = ptp_classify_raw(skb);
-
- if (class == PTP_CLASS_NONE)
- return false;
-
- switch (class) {
- case PTP_CLASS_V1_IPV4:
- case PTP_CLASS_V1_IPV6:
- case PTP_CLASS_V2_IPV4:
- case PTP_CLASS_V2_IPV6:
- case PTP_CLASS_V2_L2:
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_L2):
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV4):
- case (PTP_CLASS_V2_VLAN | PTP_CLASS_IPV6):
- return true;
- }
- return false;
+ return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
}
static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 7c634bc75615..aec95382ea5c 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -512,6 +512,7 @@ static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index fa6a06571187..88d74aef218a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -754,7 +754,7 @@ static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
return NULL;
dev_kfree_skb_any(sk_tmp);
}
- veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+ veth = skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the top of buffer */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 5ac6eaa9e785..c2d15d9c0c33 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1504,13 +1504,12 @@ static int tsi108_get_link_ksettings(struct net_device *dev,
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
- int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
- return rc;
+ return 0;
}
static int tsi108_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 4cf41f779d0e..acd29d60174a 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2307,13 +2307,12 @@ static int netdev_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
- int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
mutex_unlock(&rp->task_lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,